patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -30,8 +30,9 @@ func NewGlobalOpts() *GlobalOpts { bindProjectName() return &GlobalOpts{ - projectName: viper.GetString(projectFlag), - prompt: prompt.New(), + // Leave the projectName as empty in case it's overwritten by a global flag. + // See https://github.com/aws/amazon-ecs-cli-v2/issues/570#issuecomment-569133741 + prompt: prompt.New(), } }
1
// Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package cli contains the ecs-preview subcommands. package cli import ( "fmt" "os" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/archer" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/aws/ecr" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/color" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/command" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/term/prompt" "github.com/aws/amazon-ecs-cli-v2/internal/pkg/workspace" "github.com/aws/aws-sdk-go/aws/session" "github.com/spf13/cobra" "github.com/spf13/viper" ) // GlobalOpts holds fields that are used across multiple commands. type GlobalOpts struct { projectName string prompt prompter } // NewGlobalOpts returns a GlobalOpts with the project name retrieved from viper. func NewGlobalOpts() *GlobalOpts { bindProjectName() return &GlobalOpts{ projectName: viper.GetString(projectFlag), prompt: prompt.New(), } } // ProjectName returns the project name. // If the name is empty, it caches it after querying viper. func (o *GlobalOpts) ProjectName() string { if o.projectName != "" { return o.projectName } o.projectName = viper.GetString(projectFlag) return o.projectName } // actionCommand is the interface that every command that creates a resource implements. type actionCommand interface { Ask() error Validate() error Execute() error RecommendedActions() []string } // bindProjectName loads the project's name to viper. // If there is an error, we swallow the error and leave the default value as empty string. func bindProjectName() { name, err := loadProjectName() if err != nil { return } viper.SetDefault(projectFlag, name) } // loadProjectName retrieves the project's name from the workspace if it exists and returns it. // If there is an error, it returns an empty string and the error. func loadProjectName() (string, error) { // Load the workspace and set the project flag. ws, err := workspace.New() if err != nil { // If there's an error fetching the workspace, fall back to requiring // the project flag be set. return "", fmt.Errorf("fetching workspace: %w", err) } summary, err := ws.Summary() if err != nil { // If there's an error reading from the workspace, fall back to requiring // the project flag be set. return "", fmt.Errorf("reading from workspace: %w", err) } return summary.ProjectName, nil } type errReservedArg struct { val string } func (e *errReservedArg) Error() string { return fmt.Sprintf(`argument %s is a reserved keyword, please use a different value`, color.HighlightUserInput(e.val)) } // reservedArgs returns an error if the arguments contain any reserved keywords. func reservedArgs(cmd *cobra.Command, args []string) error { if len(args) != 1 { return nil } if args[0] == "local" { return &errReservedArg{val: "local"} } return nil } // runCmdE wraps one of the run error methods, PreRunE, RunE, of a cobra command so that if a user // types "help" in the arguments the usage string is printed instead of running the command. func runCmdE(f func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { if len(args) == 1 && args[0] == "help" { _ = cmd.Help() // Help always returns nil. os.Exit(0) } return f(cmd, args) } } type projectService interface { archer.ProjectStore archer.EnvironmentStore archer.ApplicationStore } type ecrService interface { GetRepository(name string) (string, error) GetECRAuth() (ecr.Auth, error) } type dockerService interface { Build(uri, tag, path string) error Login(uri, username, password string) error Push(uri, tag string) error } type runner interface { Run(name string, args []string, options ...command.Option) error } type defaultSessionProvider interface { Default() (*session.Session, error) } type regionalSessionProvider interface { DefaultWithRegion(region string) (*session.Session, error) } type sessionFromRoleProvider interface { FromRole(roleARN string, region string) (*session.Session, error) } type sessionProvider interface { defaultSessionProvider regionalSessionProvider sessionFromRoleProvider }
1
11,582
I feel like since now we don't initiate the `opts.projectName` with `opts.ProjectName()`, which means `opts.projectName` and `opts.ProjectName()` they don't necessarily equal to each other, we need to also substitute all usage of `opts.ProjectName()` within `Ask()`, `Validate()`, and `Execute()` to `opts.projectName`. Otherwise if `-p` is set, `opts.ProjectName()` will get expired.
aws-copilot-cli
go
@@ -531,7 +531,7 @@ namespace Nethermind.Core.Extensions [DebuggerStepThrough] public static string ByteArrayToHexViaLookup32Safe(byte[] bytes, bool withZeroX) { - if (bytes.Length == 0) + if (bytes?.Length == 0 || bytes == null) { return withZeroX ? "0x" : ""; }
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using System; using System.Buffers.Binary; using System.Collections; using System.Collections.Generic; using System.ComponentModel.Design; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.IO; using System.Numerics; using System.Reflection.Metadata; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Text; using Nethermind.Core.Crypto; using Nethermind.Dirichlet.Numerics; namespace Nethermind.Core.Extensions { public static unsafe partial class Bytes { public static readonly IEqualityComparer<byte[]> EqualityComparer = new BytesEqualityComparer(); public static readonly IComparer<byte[]> Comparer = new BytesComparer(); private class BytesEqualityComparer : EqualityComparer<byte[]> { public override bool Equals(byte[] x, byte[] y) { return AreEqual(x, y); } public override int GetHashCode(byte[] obj) { return obj.GetSimplifiedHashCode(); } } private class BytesComparer : Comparer<byte[]> { public override int Compare(byte[] x, byte[] y) { if (x == null) { return y == null ? 0 : 1; } if (y == null) { return -1; } if (x.Length == 0) { return y.Length == 0 ? 0 : 1; } for (int i = 0; i < x.Length; i++) { if (y.Length <= i) { return -1; } int result = x[i].CompareTo(y[i]); if (result != 0) { return result; } } return y.Length > x.Length ? 1 : 0; } } public static readonly byte[] Zero32 = new byte[32]; public static readonly byte[] Empty = new byte[0]; [MethodImpl(MethodImplOptions.AggressiveInlining)] public static bool GetBit(this byte b, int bitNumber) { return (b & (1 << (7 - bitNumber))) != 0; } [MethodImpl(MethodImplOptions.AggressiveInlining)] public static void SetBit(this ref byte b, int bitNumber) { byte mask = (byte) (1 << (7 - bitNumber)); b = b |= mask; } public static int GetHighestSetBitIndex(this byte b) { if ((b & 128) == 128) return 8; if ((b & 64) == 64) return 7; if ((b & 32) == 32) return 6; if ((b & 16) == 16) return 5; if ((b & 8) == 8) return 4; if ((b & 4) == 4) return 3; return (b & 2) == 2 ? 2 : b; } public static bool AreEqual(Span<byte> a1, Span<byte> a2) { // this works for nulls return a1.SequenceEqual(a2); } public static bool IsZero(this byte[] bytes) { if (bytes.Length == 32) { return bytes[31] == 0 && bytes.AsSpan().SequenceEqual(Zero32); } for (int i = 0; i < bytes.Length / 2; i++) { if (bytes[i] != 0) { return false; } if (bytes[bytes.Length - i - 1] != 0) { return false; } } return bytes.Length % 2 == 0 || bytes[bytes.Length / 2] == 0; } public static int LeadingZerosCount(this Span<byte> bytes, int startIndex = 0) { for (int i = startIndex; i < bytes.Length; i++) { if (bytes[i] != 0) { return i - startIndex; } } return bytes.Length - startIndex; } public static int TrailingZerosCount(this byte[] bytes) { for (int i = 0; i < bytes.Length; i++) { if (bytes[bytes.Length - i - 1] != 0) { return i; } } return bytes.Length; } public static Span<byte> WithoutLeadingZeros(this byte[] bytes) { return bytes.AsSpan().WithoutLeadingZeros(); } public static Span<byte> WithoutLeadingZeros(this Span<byte> bytes) { for (int i = 0; i < bytes.Length; i++) { if (bytes[i] != 0) { return bytes.Slice(i, bytes.Length - i); } } return new byte[] {0}; } public static byte[] Concat(byte prefix, byte[] bytes) { byte[] result = new byte[1 + bytes.Length]; result[0] = prefix; Buffer.BlockCopy(bytes, 0, result, 1, bytes.Length); return result; } public static byte[] PadLeft(this byte[] bytes, int length, byte padding = 0) { return PadLeft(bytes.AsSpan(), length, padding); } public static byte[] PadLeft(this Span<byte> bytes, int length, byte padding = 0) { if (bytes.Length == length) { return bytes.ToArray(); } if (bytes.Length > length) { return bytes.Slice(0, length).ToArray(); } byte[] result = new byte[length]; bytes.CopyTo(result.AsSpan().Slice(length - bytes.Length)); if (padding != 0) { for (int i = 0; i < length - bytes.Length; i++) { result[i] = padding; } } return result; } public static byte[] PadRight(this byte[] bytes, int length) { if (bytes.Length == length) { return (byte[]) bytes.Clone(); } if (bytes.Length > length) { return bytes.Slice(0, length); } byte[] result = new byte[length]; Buffer.BlockCopy(bytes, 0, result, 0, bytes.Length); return result; } public static byte[] Concat(params byte[][] parts) { int totalLength = 0; for (int i = 0; i < parts.Length; i++) { totalLength += parts[i].Length; } byte[] result = new byte[totalLength]; int position = 0; for (int i = 0; i < parts.Length; i++) { Buffer.BlockCopy(parts[i], 0, result, position, parts[i].Length); position += parts[i].Length; } return result; } public static byte[] Concat(byte[] bytes, byte suffix) { byte[] result = new byte[bytes.Length + 1]; result[^1] = suffix; Buffer.BlockCopy(bytes, 0, result, 0, bytes.Length); return result; } public static byte[] Reverse(byte[] bytes) { byte[] result = new byte[bytes.Length]; for (int i = 0; i < bytes.Length; i++) { result[i] = bytes[bytes.Length - i - 1]; } return result; } public static void ReverseInPlace(byte[] bytes) { for (int i = 0; i < bytes.Length / 2; i++) { (bytes[i], bytes[bytes.Length - i - 1]) = (bytes[bytes.Length - i - 1], bytes[i]); } } public static BigInteger ToUnsignedBigInteger(this byte[] bytes) { return ToUnsignedBigInteger(bytes.AsSpan()); } public static BigInteger ToUnsignedBigInteger(this Span<byte> bytes) { return ToUnsignedBigInteger((ReadOnlySpan<byte>) bytes); } public static BigInteger ToUnsignedBigInteger(this ReadOnlySpan<byte> bytes) { return new BigInteger(bytes, true, true); } public static uint ReadEthUInt32(this Span<byte> bytes) { return ReadEthUInt32((ReadOnlySpan<byte>) bytes); } public static uint ReadEthUInt32(this ReadOnlySpan<byte> bytes) { if (bytes.Length > 4) { bytes = bytes.Slice(bytes.Length - 4, 4); } if (bytes.Length == 4) { return BinaryPrimitives.ReadUInt32BigEndian(bytes); } Span<byte> fourBytes = stackalloc byte[4]; bytes.CopyTo(fourBytes.Slice(4 - bytes.Length)); return BinaryPrimitives.ReadUInt32BigEndian(fourBytes); } public static uint ReadEthUInt32LittleEndian(this Span<byte> bytes) { if (bytes.Length > 4) { bytes = bytes.Slice(bytes.Length - 4, 4); } if (bytes.Length == 4) { return BinaryPrimitives.ReadUInt32LittleEndian(bytes); } Span<byte> fourBytes = stackalloc byte[4]; bytes.CopyTo(fourBytes.Slice(4 - bytes.Length)); return BinaryPrimitives.ReadUInt32LittleEndian(fourBytes); } public static int ReadEthInt32(this Span<byte> bytes) { return ReadEthInt32((ReadOnlySpan<byte>) bytes); } public static int ReadEthInt32(this ReadOnlySpan<byte> bytes) { if (bytes.Length > 4) { bytes = bytes.Slice(bytes.Length - 4, 4); } if (bytes.Length == 4) { return BinaryPrimitives.ReadInt32BigEndian(bytes); } Span<byte> fourBytes = stackalloc byte[4]; bytes.CopyTo(fourBytes.Slice(4 - bytes.Length)); return BinaryPrimitives.ReadInt32BigEndian(fourBytes); } public static ulong ReadEthUInt64(this Span<byte> bytes) { return ReadEthUInt64((ReadOnlySpan<byte>) bytes); } public static ulong ReadEthUInt64(this ReadOnlySpan<byte> bytes) { if (bytes.Length > 8) { bytes = bytes.Slice(bytes.Length - 8, 8); } if (bytes.Length == 8) { return BinaryPrimitives.ReadUInt64BigEndian(bytes); } Span<byte> eightBytes = stackalloc byte[8]; bytes.CopyTo(eightBytes.Slice(8 - bytes.Length)); return BinaryPrimitives.ReadUInt64BigEndian(eightBytes); } public static BigInteger ToSignedBigInteger(this byte[] bytes, int byteLength) { if (bytes.Length == byteLength) { return new BigInteger(bytes.AsSpan(), false, true); } Debug.Assert(bytes.Length <= byteLength, $"{nameof(ToSignedBigInteger)} expects {nameof(byteLength)} parameter to be less than length of the {bytes}"); bool needToExpand = bytes.Length != byteLength; byte[] bytesToUse = needToExpand ? new byte[byteLength] : bytes; if (needToExpand) { Buffer.BlockCopy(bytes, 0, bytesToUse, byteLength - bytes.Length, bytes.Length); } byte[] signedResult = new byte[byteLength]; for (int i = 0; i < byteLength; i++) { signedResult[byteLength - i - 1] = bytesToUse[i]; } return new BigInteger(signedResult); } public static UInt256 ToUInt256(this byte[] bytes) { UInt256.CreateFromBigEndian(out UInt256 result, bytes); return result; } private static byte Reverse(byte b) { b = (byte) ((b & 0xF0) >> 4 | (b & 0x0F) << 4); b = (byte) ((b & 0xCC) >> 2 | (b & 0x33) << 2); b = (byte) ((b & 0xAA) >> 1 | (b & 0x55) << 1); return b; } public static byte[] ToBytes(this BitArray bits) { if (bits.Length % 8 != 0) { throw new ArgumentException(nameof(bits)); } byte[] bytes = new byte[bits.Length / 8]; bits.CopyTo(bytes, 0); for (int i = 0; i < bytes.Length; i++) { bytes[i] = Reverse(bytes[i]); } return bytes; } public static string ToBitString(this BitArray bits) { var sb = new StringBuilder(); for (int i = 0; i < bits.Count; i++) { char c = bits[i] ? '1' : '0'; sb.Append(c); } return sb.ToString(); } public static BitArray ToBigEndianBitArray256(this Span<byte> bytes) { byte[] inverted = new byte[32]; int startIndex = 32 - bytes.Length; for (int i = startIndex; i < inverted.Length; i++) { inverted[i] = Reverse(bytes[i - startIndex]); } return new BitArray(inverted); } public static string ToHexString(this byte[] bytes) { return ToHexString(bytes, false, false, false); } public static void StreamHex(this byte[] bytes, StreamWriter streamWriter) { for (int i = 0; i < bytes.Length; i++) { uint val = Lookup32[bytes[i]]; streamWriter.Write((char) val); streamWriter.Write((char) (val >> 16)); } } public static string ToHexString(this byte[] bytes, bool withZeroX) { return ToHexString(bytes, withZeroX, false, false); } public static string ToHexString(this byte[] bytes, bool withZeroX, bool noLeadingZeros) { return ToHexString(bytes, withZeroX, noLeadingZeros, false); } public static string ToHexString(this byte[] bytes, bool withZeroX, bool noLeadingZeros, bool withEip55Checksum) { return ByteArrayToHexViaLookup32(bytes, withZeroX, noLeadingZeros, withEip55Checksum); } private struct StateSmall { public StateSmall(byte[] bytes, bool withZeroX) { Bytes = bytes; WithZeroX = withZeroX; } public byte[] Bytes; public bool WithZeroX; } private struct State { public State(byte[] bytes, int leadingZeros, bool withZeroX, bool withEip55Checksum) { Bytes = bytes; LeadingZeros = leadingZeros; WithZeroX = withZeroX; WithEip55Checksum = withEip55Checksum; } public int LeadingZeros; public byte[] Bytes; public bool WithZeroX; public bool WithEip55Checksum; } [DebuggerStepThrough] public static string ByteArrayToHexViaLookup32Safe(byte[] bytes, bool withZeroX) { if (bytes.Length == 0) { return withZeroX ? "0x" : ""; } int length = bytes.Length * 2 + (withZeroX ? 2 : 0); StateSmall stateToPass = new StateSmall(bytes, withZeroX); return string.Create(length, stateToPass, (chars, state) => { ref var charsRef = ref MemoryMarshal.GetReference(chars); if (state.WithZeroX) { charsRef = '0'; Unsafe.Add(ref charsRef, 1) = 'x'; charsRef = ref Unsafe.Add(ref charsRef, 2); } ref var input = ref state.Bytes[0]; ref var output = ref Unsafe.As<char, uint>(ref charsRef); int toProcess = state.Bytes.Length; var lookup32 = Lookup32; while (toProcess > 8) { output = lookup32[input]; Unsafe.Add(ref output, 1) = lookup32[Unsafe.Add(ref input, 1)]; Unsafe.Add(ref output, 2) = lookup32[Unsafe.Add(ref input, 2)]; Unsafe.Add(ref output, 3) = lookup32[Unsafe.Add(ref input, 3)]; Unsafe.Add(ref output, 4) = lookup32[Unsafe.Add(ref input, 4)]; Unsafe.Add(ref output, 5) = lookup32[Unsafe.Add(ref input, 5)]; Unsafe.Add(ref output, 6) = lookup32[Unsafe.Add(ref input, 6)]; Unsafe.Add(ref output, 7) = lookup32[Unsafe.Add(ref input, 7)]; output = ref Unsafe.Add(ref output, 8); input = ref Unsafe.Add(ref input, 8); toProcess -= 8; } while (toProcess > 0) { output = lookup32[input]; output = ref Unsafe.Add(ref output, 1); input = ref Unsafe.Add(ref input, 1); toProcess -= 1; } }); } [DebuggerStepThrough] private static string ByteArrayToHexViaLookup32(byte[] bytes, bool withZeroX, bool skipLeadingZeros, bool withEip55Checksum) { int leadingZerosFirstCheck = skipLeadingZeros ? CountLeadingZeros(bytes) : 0; int length = bytes.Length * 2 + (withZeroX ? 2 : 0) - leadingZerosFirstCheck; if (skipLeadingZeros && length == (withZeroX ? 2 : 0)) { return withZeroX ? "0x0" : "0"; } State stateToPass = new State(bytes, leadingZerosFirstCheck, withZeroX, withEip55Checksum); return string.Create(length, stateToPass, (chars, state) => { string hashHex = null; if (state.WithEip55Checksum) { // this path is rarely used - only in wallets hashHex = Keccak.Compute(state.Bytes.ToHexString(false)).ToString(false); } int offset0x = 0; if (state.WithZeroX) { chars[0] = '0'; chars[1] = 'x'; offset0x += 2; } bool odd = state.LeadingZeros % 2 == 1; int oddity = odd ? 1 : 0; int charsLength = chars.Length; for (int i = offset0x; i < charsLength; i += 2) { uint val = Lookup32[state.Bytes[(i - offset0x + state.LeadingZeros) / 2]]; if (i != offset0x || !odd) { char char1 = (char) val; chars[i - oddity] = state.WithEip55Checksum && char.IsLetter(char1) && hashHex[i - offset0x] > '7' ? char.ToUpper(char1) : char1; } char char2 = (char) (val >> 16); chars[i + 1 - oddity] = state.WithEip55Checksum && char.IsLetter(char2) && hashHex[i + 1 - offset0x] > '7' ? char.ToUpper(char2) : char2; } }); } private static uint[] Lookup32 = CreateLookup32("x2"); private static uint[] CreateLookup32(string format) { uint[] result = new uint[256]; for (int i = 0; i < 256; i++) { string s = i.ToString(format); result[i] = s[0] + ((uint) s[1] << 16); } return result; } private static int CountLeadingZeros(byte[] bytes) { int leadingZeros = 0; for (int i = 0; i < bytes.Length; i++) { if ((bytes[i] & 240) == 0) { leadingZeros++; if ((bytes[i] & 15) == 0) { leadingZeros++; } else { break; } } else { break; } } return leadingZeros; } [DebuggerStepThrough] public static byte[] FromHexStringOld(string hexString) { if (hexString == null) { throw new ArgumentNullException($"{nameof(hexString)}"); } int startIndex = hexString.StartsWith("0x") ? 2 : 0; if (hexString.Length % 2 == 1) { hexString = hexString.Insert(startIndex, "0"); } int numberChars = hexString.Length - startIndex; byte[] bytes = new byte[numberChars / 2]; for (int i = 0; i < numberChars; i += 2) { bytes[i / 2] = Convert.ToByte(hexString.Substring(i + startIndex, 2), 16); } return bytes; } private static byte[] FromHexNibble1Table = { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 255, 255, 255, 255, 255, 255, 255, 160, 176, 192, 208, 224, 240, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 160, 176, 192, 208, 224, 240 }; private static byte[] FromHexNibble2Table = { 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 10, 11, 12, 13, 14, 15 }; [DebuggerStepThrough] public static byte[] FromHexString(string hexString) { if (hexString == null) { throw new ArgumentNullException($"{nameof(hexString)}"); } int startIndex = hexString.StartsWith("0x") ? 2 : 0; bool odd = hexString.Length % 2 == 1; int numberChars = hexString.Length - startIndex + (odd ? 1 : 0); byte[] bytes = new byte[numberChars / 2]; for (int i = 0; i < numberChars; i += 2) { if (odd && i == 0) { bytes[0] += FromHexNibble2Table[(byte) hexString[startIndex]]; } else if (odd) { bytes[i / 2] += FromHexNibble1Table[(byte) hexString[i + startIndex - 1]]; bytes[i / 2] += FromHexNibble2Table[(byte) hexString[i + startIndex]]; } else { bytes[i / 2] += FromHexNibble1Table[(byte) hexString[i + startIndex]]; bytes[i / 2] += FromHexNibble2Table[(byte) hexString[i + startIndex + 1]]; } } return bytes; } [SuppressMessage("ReSharper", "NonReadonlyMemberInGetHashCode")] public static int GetSimplifiedHashCode(this byte[] bytes) { const int fnvPrime = 0x01000193; if (bytes.Length == 0) { return 0; } return (fnvPrime * bytes.Length * (((fnvPrime * (bytes[0] + 7)) ^ (bytes[^1] + 23)) + 11)) ^ (bytes[(bytes.Length - 1) / 2] + 53); } [SuppressMessage("ReSharper", "NonReadonlyMemberInGetHashCode")] public static int GetSimplifiedHashCode(this Span<byte> bytes) { const int fnvPrime = 0x01000193; if (bytes.Length == 0) { return 0; } return (fnvPrime * bytes.Length * (((fnvPrime * (bytes[0] + 7)) ^ (bytes[^1] + 23)) + 11)) ^ (bytes[(bytes.Length - 1) / 2] + 53); } public static void ChangeEndianness8(Span<byte> bytes) { if (bytes.Length % 16 != 0) { throw new NotImplementedException("Has to be a multiple of 16"); } Span<ulong> ulongs = MemoryMarshal.Cast<byte, ulong>(bytes); for (int i = 0; i < ulongs.Length / 2; i++) { ulong ith = ulongs[i]; ulong endIth = ulongs[^(i + 1)]; (ulongs[i], ulongs[^(i + 1)]) = (BinaryPrimitives.ReverseEndianness(endIth), BinaryPrimitives.ReverseEndianness(ith)); } } } }
1
24,274
why do we thinkg that an empty string is ok to return from this method? I think the null should never be passed in here in the first place
NethermindEth-nethermind
.cs
@@ -142,15 +142,12 @@ axe.utils.getNodeFromTree = function(vNode, node) { return vNode; } vNode.children.forEach(candidate => { - var retVal; - - if (candidate.actualNode === node) { + if (found) { + return; + } else if (candidate.actualNode === node) { found = candidate; } else { - retVal = axe.utils.getNodeFromTree(candidate, node); - if (retVal) { - found = retVal; - } + found = axe.utils.getNodeFromTree(candidate, node); } }); return found;
1
/*eslint no-use-before-define: 0*/ var axe = axe || { utils: {} }; /** * This implemnts the flatten-tree algorithm specified: * Originally here https://drafts.csswg.org/css-scoping/#flat-tree * Hopefully soon published here: https://www.w3.org/TR/css-scoping-1/#flat-tree * * Some notable information: ******* NOTE: as of Chrome 59, this is broken in Chrome so that tests fail completely ******* removed functionality for now * 1. <slot> elements do not have boxes by default (i.e. they do not get rendered and * their CSS properties are ignored) * 2. <slot> elements can be made to have a box by overriding the display property * which is 'contents' by default * 3. Even boxed <slot> elements do not show up in the accessibility tree until * they have a tabindex applied to them OR they have a role applied to them AND * they have a box (this is observed behavior in Safari on OS X, I cannot find * the spec for this) */ /** * Wrap the real node and provide list of the flattened children * * @param node {Node} - the node in question * @param shadowId {String} - the ID of the shadow DOM to which this node belongs * @return {Object} - the wrapped node */ function virtualDOMfromNode(node, shadowId) { return { shadowId: shadowId, children: [], actualNode: node }; } /** * find all the fallback content for a <slot> and return these as an array * this array will also include any #text nodes * * @param node {Node} - the slot Node * @return Array{Nodes} */ function getSlotChildren(node) { var retVal = []; node = node.firstChild; while (node) { retVal.push(node); node = node.nextSibling; } return retVal; } /** * Recursvely returns an array of the virtual DOM nodes at this level * excluding comment nodes and the shadow DOM nodes <content> and <slot> * * @param {Node} node the current node * @param {String} shadowId, optional ID of the shadow DOM that is the closest shadow * ancestor of the node */ axe.utils.getFlattenedTree = function(node, shadowId) { // using a closure here and therefore cannot easily refactor toreduce the statements /*eslint max-statements: ["error", 31] */ var retVal, realArray, nodeName; function reduceShadowDOM(res, child) { var replacements = axe.utils.getFlattenedTree(child, shadowId); if (replacements) { res = res.concat(replacements); } return res; } if (node.documentElement) { // document node = node.documentElement; } nodeName = node.nodeName.toLowerCase(); if (axe.utils.isShadowRoot(node)) { // generate an ID for this shadow root and overwrite the current // closure shadowId with this value so that it cascades down the tree retVal = virtualDOMfromNode(node, shadowId); shadowId = 'a' + Math.random() .toString() .substring(2); realArray = Array.from(node.shadowRoot.childNodes); retVal.children = realArray.reduce(reduceShadowDOM, []); return [retVal]; } else { if (nodeName === 'content') { realArray = Array.from(node.getDistributedNodes()); return realArray.reduce(reduceShadowDOM, []); } else if ( nodeName === 'slot' && typeof node.assignedNodes === 'function' ) { realArray = Array.from(node.assignedNodes()); if (!realArray.length) { // fallback content realArray = getSlotChildren(node); } var styl = window.getComputedStyle(node); // check the display property if (false && styl.display !== 'contents') { // intentionally commented out // has a box retVal = virtualDOMfromNode(node, shadowId); retVal.children = realArray.reduce(reduceShadowDOM, []); return [retVal]; } else { return realArray.reduce(reduceShadowDOM, []); } } else { if (node.nodeType === 1) { retVal = virtualDOMfromNode(node, shadowId); realArray = Array.from(node.childNodes); retVal.children = realArray.reduce(reduceShadowDOM, []); return [retVal]; } else if (node.nodeType === 3) { // text return [virtualDOMfromNode(node)]; } return undefined; } } }; /** * Recursively return a single node from a virtual dom tree * * @param {Object} vNode The flattened, virtual DOM tree * @param {Node} node The HTML DOM node */ axe.utils.getNodeFromTree = function(vNode, node) { var found; if (vNode.actualNode === node) { return vNode; } vNode.children.forEach(candidate => { var retVal; if (candidate.actualNode === node) { found = candidate; } else { retVal = axe.utils.getNodeFromTree(candidate, node); if (retVal) { found = retVal; } } }); return found; };
1
13,767
nit: can remove this `else` since we return from the condition above.
dequelabs-axe-core
js
@@ -39,11 +39,6 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http _flushCompleted = OnFlushCompleted; } - public void Write(ArraySegment<byte> buffer, bool chunk = false) - { - WriteAsync(buffer, default(CancellationToken), chunk).GetAwaiter().GetResult(); - } - public Task WriteAsync(ArraySegment<byte> buffer, bool chunk = false, CancellationToken cancellationToken = default(CancellationToken)) { if (cancellationToken.IsCancellationRequested)
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; using Microsoft.AspNetCore.Server.Kestrel.Internal.System.IO.Pipelines; using Microsoft.Extensions.Internal; namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http { public class OutputProducer : IDisposable { private static readonly ArraySegment<byte> _emptyData = new ArraySegment<byte>(new byte[0]); private readonly string _connectionId; private readonly IKestrelTrace _log; // This locks access to to all of the below fields private readonly object _contextLock = new object(); private bool _completed = false; private readonly IPipe _pipe; // https://github.com/dotnet/corefxlab/issues/1334 // Pipelines don't support multiple awaiters on flush // this is temporary until it does private TaskCompletionSource<object> _flushTcs; private readonly object _flushLock = new object(); private Action _flushCompleted; public OutputProducer(IPipe pipe, string connectionId, IKestrelTrace log) { _pipe = pipe; _connectionId = connectionId; _log = log; _flushCompleted = OnFlushCompleted; } public void Write(ArraySegment<byte> buffer, bool chunk = false) { WriteAsync(buffer, default(CancellationToken), chunk).GetAwaiter().GetResult(); } public Task WriteAsync(ArraySegment<byte> buffer, bool chunk = false, CancellationToken cancellationToken = default(CancellationToken)) { if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } return WriteAsync(buffer, cancellationToken, chunk); } public void Flush() { WriteAsync(_emptyData, default(CancellationToken)).GetAwaiter().GetResult(); } public Task FlushAsync(CancellationToken cancellationToken = default(CancellationToken)) { return WriteAsync(_emptyData, cancellationToken); } public void Write<T>(Action<WritableBuffer, T> callback, T state) { lock (_contextLock) { if (_completed) { return; } var buffer = _pipe.Writer.Alloc(1); callback(buffer, state); buffer.Commit(); } } public void Dispose() { lock (_contextLock) { if (_completed) { return; } _log.ConnectionDisconnect(_connectionId); _completed = true; _pipe.Writer.Complete(); } } public void Abort() { lock (_contextLock) { if (_completed) { return; } _log.ConnectionDisconnect(_connectionId); _completed = true; _pipe.Reader.CancelPendingRead(); } } private Task WriteAsync( ArraySegment<byte> buffer, CancellationToken cancellationToken, bool chunk = false) { var writableBuffer = default(WritableBuffer); lock (_contextLock) { if (_completed) { return TaskCache.CompletedTask; } writableBuffer = _pipe.Writer.Alloc(1); var writer = new WritableBufferWriter(writableBuffer); if (buffer.Count > 0) { if (chunk) { ChunkWriter.WriteBeginChunkBytes(ref writer, buffer.Count); } writer.Write(buffer.Array, buffer.Offset, buffer.Count); if (chunk) { ChunkWriter.WriteEndChunkBytes(ref writer); } } writableBuffer.Commit(); } return FlushAsync(writableBuffer, cancellationToken); } private Task FlushAsync(WritableBuffer writableBuffer, CancellationToken cancellationToken) { var awaitable = writableBuffer.FlushAsync(cancellationToken); if (awaitable.IsCompleted) { // The flush task can't fail today return TaskCache.CompletedTask; } return FlushAsyncAwaited(awaitable, cancellationToken); } private async Task FlushAsyncAwaited(WritableBufferAwaitable awaitable, CancellationToken cancellationToken) { // https://github.com/dotnet/corefxlab/issues/1334 // Since the flush awaitable doesn't currently support multiple awaiters // we need to use a task to track the callbacks. // All awaiters get the same task lock (_flushLock) { if (_flushTcs == null || _flushTcs.Task.IsCompleted) { _flushTcs = new TaskCompletionSource<object>(); awaitable.OnCompleted(_flushCompleted); } } await _flushTcs.Task; cancellationToken.ThrowIfCancellationRequested(); } private void OnFlushCompleted() { _flushTcs.TrySetResult(null); } } }
1
13,219
You missed `Write<T>(...)`! I'm kidding ofc.
aspnet-KestrelHttpServer
.cs
@@ -5,7 +5,7 @@ class SubscriptionUpcomingInvoiceUpdater def process @subscriptions.each do |subscription| - if subscription.stripe_customer_id + if subscription.stripe_customer_id.present? upcoming_invoice = upcoming_invoice_for(subscription.stripe_customer_id) update_next_payment_information(subscription, upcoming_invoice) end
1
class SubscriptionUpcomingInvoiceUpdater def initialize(subscriptions) @subscriptions = subscriptions end def process @subscriptions.each do |subscription| if subscription.stripe_customer_id upcoming_invoice = upcoming_invoice_for(subscription.stripe_customer_id) update_next_payment_information(subscription, upcoming_invoice) end end end private def upcoming_invoice_for(stripe_customer_id) Stripe::Invoice.upcoming(customer: stripe_customer_id) rescue Stripe::InvalidRequestError => error notify_airbrake(error) nil end def update_next_payment_information(subscription, upcoming_invoice) if upcoming_invoice update_next_payment_information_from_upcoming_invoice(subscription, upcoming_invoice) else clear_next_payment_information(subscription) end end def update_next_payment_information_from_upcoming_invoice(subscription, upcoming_invoice) subscription.update!( next_payment_amount: upcoming_invoice.total, next_payment_on: Time.zone.at(upcoming_invoice.period_end) ) end def clear_next_payment_information(subscription) subscription.update!( next_payment_amount: 0, next_payment_on: nil ) end def notify_airbrake(error) unless error_is_because_user_has_no_upcoming_invoice?(error) Airbrake.notify(error) end end def error_is_because_user_has_no_upcoming_invoice?(error) error.http_status == 404 end end
1
12,920
Was there a customer who had this set to an empty string?
thoughtbot-upcase
rb
@@ -13,6 +13,14 @@ module Ncr approver_email_address(final_approver) end + def status_aware_approver_email_address + if proposal.approved? + final_approver_email_address + else + current_approver_email_address + end + end + private def approver_email_address(approver)
1
module Ncr class WorkOrderDecorator < Draper::Decorator delegate_all EMERGENCY_APPROVER_EMAIL = 'Emergency - Verbal Approval' NO_APPROVER_FOUND = 'No Approver Found' def current_approver_email_address approver_email_address(current_approver) end def final_approver_email_address approver_email_address(final_approver) end private def approver_email_address(approver) if approver approver.email_address elsif emergency EMERGENCY_APPROVER_EMAIL else NO_APPROVER_FOUND end end end end
1
14,838
I know we have the `reporter` spec below, but what about a unit test for this to explain reasoning behind logic? If I were going to update this decorator, I would assume it wasn't covered by tests because there is no unit test.
18F-C2
rb
@@ -34,4 +34,6 @@ public interface RestClientRequest { void addForm(String name, Object value); Buffer getBodyBuffer() throws Exception; + + void attach(String name, String filename); }
1
/* * Copyright 2017 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.servicecomb.common.rest.codec; import io.vertx.core.buffer.Buffer; /** * vertx的HttpClientRequest没有getHeader的能力 * 在写cookie参数时,没办法多次添加cookie,所以只能进行接口包装 */ public interface RestClientRequest { void write(Buffer bodyBuffer); void end() throws Exception; void addCookie(String name, String value); void putHeader(String name, String value); void addForm(String name, Object value); Buffer getBodyBuffer() throws Exception; }
1
7,632
it's better to be : void attach(String name, Part part);
apache-servicecomb-java-chassis
java
@@ -77,7 +77,8 @@ public abstract class BasePageIterator { protected abstract void initDefinitionLevelsReader(DataPageV1 dataPageV1, ColumnDescriptor descriptor, ByteBufferInputStream in, int count) throws IOException; - protected abstract void initDefinitionLevelsReader(DataPageV2 dataPageV2, ColumnDescriptor descriptor); + protected abstract void initDefinitionLevelsReader(DataPageV2 dataPageV2, ColumnDescriptor descriptor) + throws IOException; public int currentPageCount() { return triplesCount;
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.parquet; import java.io.IOException; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.parquet.bytes.ByteBufferInputStream; import org.apache.parquet.bytes.BytesInput; import org.apache.parquet.bytes.BytesUtils; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.Encoding; import org.apache.parquet.column.ValuesType; import org.apache.parquet.column.page.DataPage; import org.apache.parquet.column.page.DataPageV1; import org.apache.parquet.column.page.DataPageV2; import org.apache.parquet.column.values.ValuesReader; import org.apache.parquet.column.values.rle.RunLengthBitPackingHybridDecoder; import org.apache.parquet.io.ParquetDecodingException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @SuppressWarnings("checkstyle:VisibilityModifier") public abstract class BasePageIterator { private static final Logger LOG = LoggerFactory.getLogger(BasePageIterator.class); protected final ColumnDescriptor desc; protected final String writerVersion; // iterator state protected boolean hasNext = false; protected int triplesRead = 0; protected int currentDL = 0; protected int currentRL = 0; // page bookkeeping protected Dictionary dictionary = null; protected DataPage page = null; protected int triplesCount = 0; protected Encoding valueEncoding = null; protected IntIterator definitionLevels = null; protected IntIterator repetitionLevels = null; protected ValuesReader values = null; protected BasePageIterator(ColumnDescriptor descriptor, String writerVersion) { this.desc = descriptor; this.writerVersion = writerVersion; } protected void reset() { this.page = null; this.triplesCount = 0; this.triplesRead = 0; this.repetitionLevels = null; this.hasNext = false; } protected abstract void initDataReader(Encoding dataEncoding, ByteBufferInputStream in, int valueCount); protected abstract void initDefinitionLevelsReader(DataPageV1 dataPageV1, ColumnDescriptor descriptor, ByteBufferInputStream in, int count) throws IOException; protected abstract void initDefinitionLevelsReader(DataPageV2 dataPageV2, ColumnDescriptor descriptor); public int currentPageCount() { return triplesCount; } public boolean hasNext() { return hasNext; } public void setPage(DataPage page) { Preconditions.checkNotNull(page, "Cannot read from null page"); this.page = page; this.page.accept(new DataPage.Visitor<ValuesReader>() { @Override public ValuesReader visit(DataPageV1 dataPageV1) { initFromPage(dataPageV1); return null; } @Override public ValuesReader visit(DataPageV2 dataPageV2) { initFromPage(dataPageV2); return null; } }); this.triplesRead = 0; this.hasNext = triplesRead < triplesCount; } protected void initFromPage(DataPageV1 initPage) { this.triplesCount = initPage.getValueCount(); ValuesReader rlReader = initPage.getRlEncoding().getValuesReader(desc, ValuesType.REPETITION_LEVEL); this.repetitionLevels = new ValuesReaderIntIterator(rlReader); try { BytesInput bytes = initPage.getBytes(); LOG.debug("page size {} bytes and {} records", bytes.size(), triplesCount); LOG.debug("reading repetition levels at 0"); ByteBufferInputStream in = bytes.toInputStream(); rlReader.initFromPage(triplesCount, in); LOG.debug("reading definition levels at {}", in.position()); initDefinitionLevelsReader(initPage, desc, in, triplesCount); LOG.debug("reading data at {}", in.position()); initDataReader(initPage.getValueEncoding(), in, initPage.getValueCount()); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + initPage + " in col " + desc, e); } } protected void initFromPage(DataPageV2 initPage) { this.triplesCount = initPage.getValueCount(); this.repetitionLevels = newRLEIterator(desc.getMaxRepetitionLevel(), initPage.getRepetitionLevels()); try { initDefinitionLevelsReader(initPage, desc); LOG.debug("page data size {} bytes and {} records", initPage.getData().size(), triplesCount); initDataReader(initPage.getDataEncoding(), initPage.getData().toInputStream(), triplesCount); } catch (IOException e) { throw new ParquetDecodingException("could not read page " + initPage + " in col " + desc, e); } } public void setDictionary(Dictionary dict) { this.dictionary = dict; } protected abstract static class IntIterator { abstract int nextInt(); } static class ValuesReaderIntIterator extends IntIterator { private final ValuesReader delegate; ValuesReaderIntIterator(ValuesReader delegate) { this.delegate = delegate; } @Override int nextInt() { return delegate.readInteger(); } } IntIterator newRLEIterator(int maxLevel, BytesInput bytes) { try { if (maxLevel == 0) { return new PageIterator.NullIntIterator(); } return new RLEIntIterator( new RunLengthBitPackingHybridDecoder( BytesUtils.getWidthFromMaxInt(maxLevel), bytes.toInputStream())); } catch (IOException e) { throw new ParquetDecodingException("could not read levels in page for col " + desc, e); } } static class RLEIntIterator extends IntIterator { private final RunLengthBitPackingHybridDecoder delegate; RLEIntIterator(RunLengthBitPackingHybridDecoder delegate) { this.delegate = delegate; } @Override int nextInt() { try { return delegate.readInt(); } catch (IOException e) { throw new ParquetDecodingException(e); } } } static final class NullIntIterator extends IntIterator { @Override int nextInt() { return 0; } } }
1
38,669
I didn't see where the IOException can get thrown, is this just to match the V1 reader?
apache-iceberg
java
@@ -418,7 +418,7 @@ unsigned int compute2DCoordsMimicDistMat( RDKit::ROMol &mol, const DOUBLE_SMART_PTR *dmat, bool canonOrient, bool clearConfs, double weightDistMat, unsigned int nFlipsPerSample, unsigned int nSamples, int sampleSeed, bool permuteDeg4Nodes, - bool forceRDKit) { + bool /* forceRDKit */) { // storage for pieces of a molecule/s that are embedded in 2D std::list<EmbeddedFrag> efrags; computeInitialCoords(mol, nullptr, efrags);
1
// // Copyright (C) 2003-2017 Greg Landrum and Rational Discovery LLC // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include "RDDepictor.h" #include "EmbeddedFrag.h" #ifdef RDK_BUILD_COORDGEN_SUPPORT #include <CoordGen/CoordGen.h> #endif #include <RDGeneral/types.h> #include <GraphMol/ROMol.h> #include <GraphMol/Conformer.h> #include <math.h> #include <GraphMol/MolOps.h> #include <GraphMol/Rings.h> #include <Geometry/point.h> #include <Geometry/Transform2D.h> #include <GraphMol/Substruct/SubstructMatch.h> #include "EmbeddedFrag.h" #include "DepictUtils.h" #include <iostream> #include <boost/dynamic_bitset.hpp> #include <algorithm> namespace RDDepict { #ifdef RDK_BUILD_COORDGEN_SUPPORT bool preferCoordGen = false; #endif namespace DepictorLocal { // arings: indices of atoms in rings void embedFusedSystems(const RDKit::ROMol &mol, const RDKit::VECT_INT_VECT &arings, std::list<EmbeddedFrag> &efrags) { RDKit::INT_INT_VECT_MAP neighMap; RingUtils::makeRingNeighborMap(arings, neighMap); RDKit::INT_VECT fused; size_t cnrs = arings.size(); boost::dynamic_bitset<> fusDone(cnrs); size_t curr = 0; while (curr < cnrs) { // embed all ring and fused ring systems fused.resize(0); RingUtils::pickFusedRings(curr, neighMap, fused, fusDone); RDKit::VECT_INT_VECT frings; frings.reserve(fused.size()); for (RDKit::INT_VECT_CI rid = fused.begin(); rid != fused.end(); ++rid) { frings.push_back(arings[*rid]); } EmbeddedFrag efrag(&mol, frings); efrag.setupNewNeighs(); efrags.push_back(efrag); size_t rix; for (rix = 0; rix < cnrs; ++rix) { if (!fusDone[rix]) { curr = rix; break; } } if (rix == cnrs) { break; } } } void embedCisTransSystems(const RDKit::ROMol &mol, std::list<EmbeddedFrag> &efrags) { for (RDKit::ROMol::ConstBondIterator cbi = mol.beginBonds(); cbi != mol.endBonds(); ++cbi) { // check if this bond is in a cis/trans double bond // and it is not a ring bond if (((*cbi)->getBondType() == RDKit::Bond::DOUBLE) // this is a double bond && ((*cbi)->getStereo() > RDKit::Bond::STEREOANY) // and has stereo chemistry specified && (!(*cbi)->getOwningMol().getRingInfo()->numBondRings( (*cbi)->getIdx()))) { // not in a ring if ((*cbi)->getStereoAtoms().size() != 2) { BOOST_LOG(rdWarningLog) << "WARNING: bond found with stereo spec but no stereo atoms" << std::endl; continue; } EmbeddedFrag efrag(*cbi); efrag.setupNewNeighs(); efrags.push_back(efrag); } } } RDKit::INT_LIST getNonEmbeddedAtoms(const RDKit::ROMol &mol, const std::list<EmbeddedFrag> &efrags) { RDKit::INT_LIST res; boost::dynamic_bitset<> done(mol.getNumAtoms()); for (const auto &efrag : efrags) { const INT_EATOM_MAP &oatoms = efrag.GetEmbeddedAtoms(); for (const auto &oatom : oatoms) { done[oatom.first] = 1; } } for (RDKit::ROMol::ConstAtomIterator ai = mol.beginAtoms(); ai != mol.endAtoms(); ai++) { int aid = (*ai)->getIdx(); if (!done[aid]) { res.push_back(aid); } } return res; } // find the largest fragments that is not done yet ( // i.e. merged with the master fragments) // if do not find anything we return efrags.end() std::list<EmbeddedFrag>::iterator _findLargestFrag( std::list<EmbeddedFrag> &efrags) { std::list<EmbeddedFrag>::iterator mfri; int msiz = 0; for (auto efri = efrags.begin(); efri != efrags.end(); efri++) { if ((!efri->isDone()) && (efri->Size() > msiz)) { msiz = efri->Size(); mfri = efri; } } if (msiz == 0) { mfri = efrags.end(); } return mfri; } void _shiftCoords(std::list<EmbeddedFrag> &efrags) { // shift the coordinates if there are multiple fragments // so that the fragments do not overlap each other if (efrags.empty()) { return; } for (auto &efrag : efrags) { efrag.computeBox(); } auto eri = efrags.begin(); double xmax = eri->getBoxPx(); double xmin = eri->getBoxNx(); double ymax = eri->getBoxPy(); double ymin = eri->getBoxNy(); ++eri; while (eri != efrags.end()) { bool xshift = true; if (xmax + xmin > ymax + ymin) { xshift = false; } double xn = eri->getBoxNx(); double xp = eri->getBoxPx(); double yn = eri->getBoxNy(); double yp = eri->getBoxPy(); RDGeom::Point2D shift(0.0, 0.0); if (xshift) { shift.x = xmax + xn + 1.0; shift.y = 0.0; xmax += xp + xn + 1.0; } else { shift.x = 0.0; shift.y = ymax + yn + 1.0; ymax += yp + yn + 1.0; } eri->Translate(shift); ++eri; } } } void computeInitialCoords(RDKit::ROMol &mol, const RDGeom::INT_POINT2D_MAP *coordMap, std::list<EmbeddedFrag> &efrags) { RDKit::INT_VECT atomRanks; atomRanks.resize(mol.getNumAtoms()); for (unsigned int i = 0; i < mol.getNumAtoms(); ++i) { atomRanks[i] = getAtomDepictRank(mol.getAtomWithIdx(i)); } RDKit::VECT_INT_VECT arings; // first find all the rings RDKit::MolOps::symmetrizeSSSR(mol, arings); // do stereochemistry RDKit::MolOps::assignStereochemistry(mol, false); efrags.clear(); // user specfied coordinates exist bool preSpec = false; // first embed any atoms for which the coordinates have been specified. if ((coordMap) && (coordMap->size() > 1)) { EmbeddedFrag efrag(&mol, *coordMap); // add this to the list of embedded fragments efrags.push_back(efrag); preSpec = true; } if (arings.size() > 0) { // first deal with the fused rings DepictorLocal::embedFusedSystems(mol, arings, efrags); } // deal with any cis/trans systems DepictorLocal::embedCisTransSystems(mol, efrags); // now get the atoms that are not yet embedded in either a cis/trans system // or a ring system (or simply the first atom) RDKit::INT_LIST nratms = DepictorLocal::getNonEmbeddedAtoms(mol, efrags); std::list<EmbeddedFrag>::iterator mri; if (preSpec) { // if the user specified coordinates on some of the atoms use that as // as the starting fragment and it should be at the beginning of the vector mri = efrags.begin(); } else { // otherwise - find the largest fragment that was embedded mri = DepictorLocal::_findLargestFrag(efrags); } while ((mri != efrags.end()) || (nratms.size() > 0)) { if (mri == efrags.end()) { // we are out of embedded fragments, if there are any // non embedded atoms use them to start a fragment int mrank, rank; mrank = static_cast<int>(RDKit::MAX_INT); RDKit::INT_LIST_I nri, mnri; for (nri = nratms.begin(); nri != nratms.end(); nri++) { rank = atomRanks[*nri]; rank *= mol.getNumAtoms(); // use the atom index as well so that we at least // get reproduceable depictions in cases where things // have identical ranks. rank += *nri; if (rank < mrank) { mrank = rank; mnri = nri; } } EmbeddedFrag efrag((*mnri), &mol); nratms.erase(mnri); efrags.push_back(efrag); mri = efrags.end(); mri--; } mri->markDone(); mri->expandEfrag(nratms, efrags); mri = DepictorLocal::_findLargestFrag(efrags); } // at this point any remaining efrags should belong individual fragments in // the molecule } unsigned int copyCoordinate(RDKit::ROMol &mol, std::list<EmbeddedFrag> &efrags, bool clearConfs) { // create a conformation to store the coordinates and add it to the molecule auto *conf = new RDKit::Conformer(mol.getNumAtoms()); conf->set3D(false); std::list<EmbeddedFrag>::iterator eri; for (eri = efrags.begin(); eri != efrags.end(); eri++) { const INT_EATOM_MAP &eatoms = eri->GetEmbeddedAtoms(); INT_EATOM_MAP_CI eai; for (eai = eatoms.begin(); eai != eatoms.end(); eai++) { int aid = eai->first; RDGeom::Point2D cr = eai->second.loc; RDGeom::Point3D fcr(cr.x, cr.y, 0.0); conf->setAtomPos(aid, fcr); } } unsigned int confId = 0; if (clearConfs) { // clear all the conformation on the molecules and assign conf ID 0 to this // conformation mol.clearConformers(); conf->setId(confId); // conf ID has already been set in this case to 0 - not other // confs on the molecule at this point mol.addConformer(conf); } else { // let add conf assign a conformation ID for the conformation confId = mol.addConformer(conf, true); } return confId; } // // // 50,000 foot algorithm: // 1) Find rings // 2) Find fused systems // 3) embed largest fused system // 4) foreach unfinished atom: // 1) find neighbors // 2) if neighbor is non-ring atom, embed it; otherwise merge the // ring system // 3) add all atoms just merged/embedded to unfinished atom list // // unsigned int compute2DCoords(RDKit::ROMol &mol, const RDGeom::INT_POINT2D_MAP *coordMap, bool canonOrient, bool clearConfs, unsigned int nFlipsPerSample, unsigned int nSamples, int sampleSeed, bool permuteDeg4Nodes, bool forceRDKit) { #ifdef RDK_BUILD_COORDGEN_SUPPORT // default to use CoordGen if we have it installed if (!forceRDKit && preferCoordGen) { RDKit::CoordGen::CoordGenParams params; if (coordMap) params.coordMap = *coordMap; return RDKit::CoordGen::addCoords(mol, &params); }; #endif // storage for pieces of a molecule/s that are embedded in 2D std::list<EmbeddedFrag> efrags; computeInitialCoords(mol, coordMap, efrags); std::list<EmbeddedFrag>::iterator eri; // perform random sampling here to improve the density for (eri = efrags.begin(); eri != efrags.end(); eri++) { // either sample the 2D space by randomly flipping rotatable // bonds in the structure or flip only bonds along the shortest // path between colliding atoms - don't do both if ((nSamples > 0) && (nFlipsPerSample > 0)) { eri->randomSampleFlipsAndPermutations(nFlipsPerSample, nSamples, sampleSeed, nullptr, 0.0, permuteDeg4Nodes); } else { eri->removeCollisionsBondFlip(); } } for (eri = efrags.begin(); eri != efrags.end(); eri++) { // if there are any remaining collisions eri->removeCollisionsOpenAngles(); eri->removeCollisionsShortenBonds(); } if (!coordMap || !coordMap->size()) { if (canonOrient && efrags.size()) { // if we do not have any prespecified coordinates - canonicalize // the orientation of the fragment so that the longest axes fall // along the x-axis etc. for (eri = efrags.begin(); eri != efrags.end(); eri++) { eri->canonicalizeOrientation(); } } } DepictorLocal::_shiftCoords(efrags); // create a confomation on the moelcule and copy the coodinates unsigned int cid = copyCoordinate(mol, efrags, clearConfs); // special case for a single-atom coordMap template if ((coordMap) && (coordMap->size() == 1)) { RDKit::Conformer &conf = mol.getConformer(cid); auto cRef = coordMap->begin(); RDGeom::Point3D confPos = conf.getAtomPos(cRef->first); RDGeom::Point2D refPos = cRef->second; refPos.x -= confPos.x; refPos.y -= confPos.y; for (unsigned int i = 0; i < conf.getNumAtoms(); ++i) { confPos = conf.getAtomPos(i); confPos.x += refPos.x; confPos.y += refPos.y; conf.setAtomPos(i, confPos); } } return cid; } //! \brief Compute the 2D coordinates such that the interatom distances //! mimic those in a distance matrix /*! This function generates 2D coordinates such that the inter atom distance mimic those specified via dmat. This is done by randomly sampling(flipping) the rotatable bonds in the molecule and evaluating a cost function which contains two components. The first component is the sum of inverse of the squared inter-atom distances, this helps in spreading the atoms far from each other. The second component is the sum of squares of the difference in distance between those in dmat and the generated structure. The user can adjust the relative importance of the two components via a adjustable paramter (see below) ARGUMENTS: \param mol - molecule involved in the frgament \param dmat - the distance matrix we want to mimic, this is symmteric N by N matrix when N is the number of atoms in mol. All ngative entries in dmat are ignored. \param canonOrient - canonicalze the orientation after the 2D embedding is done \param clearConfs - clear any previously existing conformations on mol before adding a conformation \param weightDistMat - A value between 0.0 and 1.0, this determines the importance of mimicing the the inter atoms distances in dmat. (1.0 - weightDistMat) is the weight associated to spreading out the structure (density) in the cost function \param nFlipsPerSample - the number of rotatable bonds that are randomly flipped for each sample \param nSample - the number of samples \param sampleSeed - seed for the random sampling process */ unsigned int compute2DCoordsMimicDistMat( RDKit::ROMol &mol, const DOUBLE_SMART_PTR *dmat, bool canonOrient, bool clearConfs, double weightDistMat, unsigned int nFlipsPerSample, unsigned int nSamples, int sampleSeed, bool permuteDeg4Nodes, bool forceRDKit) { // storage for pieces of a molecule/s that are embedded in 2D std::list<EmbeddedFrag> efrags; computeInitialCoords(mol, nullptr, efrags); // now perform random flips of rotatable bonds so taht we can sample the space // and try to mimic the distances in dmat std::list<EmbeddedFrag>::iterator eri; for (eri = efrags.begin(); eri != efrags.end(); eri++) { eri->randomSampleFlipsAndPermutations(nFlipsPerSample, nSamples, sampleSeed, dmat, weightDistMat, permuteDeg4Nodes); } if (canonOrient && efrags.size()) { // canonicalize the orientation of the fragment so that the // longest axes fall along the x-axis etc. for (eri = efrags.begin(); eri != efrags.end(); eri++) { eri->canonicalizeOrientation(); } } DepictorLocal::_shiftCoords(efrags); // create a confomation on the moelcule and copy the coodinates unsigned int cid = copyCoordinate(mol, efrags, clearConfs); return cid; } //! \brief Compute 2D coordinates where a piece of the molecule is // constrained to have the same coordinates as a reference. void generateDepictionMatching2DStructure(RDKit::ROMol &mol, const RDKit::ROMol &reference, int confId, RDKit::ROMol *referencePattern, bool acceptFailure, bool forceRDKit) { std::vector<int> refMatch; RDKit::MatchVectType matchVect; if (referencePattern) { if (reference.getNumAtoms(true) != referencePattern->getNumAtoms(true)) { throw RDDepict::DepictException( "When a pattern is provided, it must have the same number of atoms " "as the reference"); } RDKit::MatchVectType refMatchVect; RDKit::SubstructMatch(reference, *referencePattern, refMatchVect); if (refMatchVect.empty()) { throw RDDepict::DepictException( "Reference pattern does not map to reference."); } refMatch.reserve(refMatchVect.size()); for (auto &i : refMatchVect) { refMatch.push_back(i.second); } RDKit::SubstructMatch(mol, *referencePattern, matchVect); } else { refMatch.reserve(reference.getNumAtoms(true)); for (unsigned int i = 0; i < reference.getNumAtoms(true); ++i) { refMatch.push_back(i); } RDKit::SubstructMatch(mol, reference, matchVect); } RDGeom::INT_POINT2D_MAP coordMap; if (matchVect.empty()) { if (!acceptFailure) { throw RDDepict::DepictException( "Substructure match with reference not found."); } } else { const RDKit::Conformer &conf = reference.getConformer(confId); for (RDKit::MatchVectType::const_iterator mv = matchVect.begin(); mv != matchVect.end(); ++mv) { RDGeom::Point3D pt3 = conf.getAtomPos(refMatch[mv->first]); RDGeom::Point2D pt2(pt3.x, pt3.y); coordMap[mv->second] = pt2; } } RDDepict::compute2DCoords(mol, &coordMap, false /* canonOrient */, true /* clearConfs */, 0, 0, 0, false, forceRDKit); } //! \brief Generate a 2D depiction for a molecule where all or part of // it mimics the coordinates of a 3D reference structure. void generateDepictionMatching3DStructure(RDKit::ROMol &mol, const RDKit::ROMol &reference, int confId, RDKit::ROMol *referencePattern, bool acceptFailure, bool forceRDKit) { unsigned int num_ats = mol.getNumAtoms(); if (!referencePattern && reference.getNumAtoms() < num_ats) { if (acceptFailure) { RDDepict::compute2DCoords(mol); return; } else { throw RDDepict::DepictException( "Reference molecule not compatible with target molecule."); } } std::vector<int> mol_to_ref(num_ats, -1); if (referencePattern && referencePattern->getNumAtoms()) { RDKit::MatchVectType molMatchVect, refMatchVect; RDKit::SubstructMatch(mol, *referencePattern, molMatchVect); RDKit::SubstructMatch(reference, *referencePattern, refMatchVect); if (molMatchVect.empty() || refMatchVect.empty()) { if (acceptFailure) { RDDepict::compute2DCoords(mol); return; } else { throw RDDepict::DepictException( "Reference pattern didn't match molecule or reference."); } } for (size_t i = 0; i < molMatchVect.size(); ++i) { mol_to_ref[molMatchVect[i].second] = refMatchVect[i].second; } } else { for (unsigned int i = 0; i < num_ats; ++i) { mol_to_ref[i] = i; } } const RDKit::Conformer &conf = reference.getConformer(confId); // the distance matrix is a triangular representation RDDepict::DOUBLE_SMART_PTR dmat(new double[num_ats * (num_ats - 1) / 2]); // negative distances are ignored, so initialise to -1.0 so subset by // referencePattern works. std::fill(dmat.get(), dmat.get() + num_ats * (num_ats - 1) / 2, -1.0); for (unsigned int i = 0; i < num_ats; ++i) { if (-1 == mol_to_ref[i]) { continue; } RDGeom::Point3D cds_i = conf.getAtomPos(i); for (unsigned int j = i + 1; j < num_ats; ++j) { if (-1 == mol_to_ref[j]) { continue; } RDGeom::Point3D cds_j = conf.getAtomPos(mol_to_ref[j]); dmat[(j * (j - 1) / 2) + i] = (cds_i - cds_j).length(); } } RDDepict::compute2DCoordsMimicDistMat(mol, &dmat, false, true, 0.5, 3, 100, 25, true, forceRDKit); } }
1
18,527
We normally suppress this warning with `RDUSED_PARAM(forceRDKit)`
rdkit-rdkit
cpp
@@ -473,12 +473,11 @@ Player* Game::getPlayerByGUID(const uint32_t& guid) return nullptr; } - for (const auto& it : players) { - if (guid == it.second->getGUID()) { - return it.second; - } + auto it = mappedPlayerGuids.find(guid); + if (it == mappedPlayerGuids.end()) { + return nullptr; } - return nullptr; + return it->second; } ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player)
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2018 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "pugicast.h" #include "actions.h" #include "bed.h" #include "configmanager.h" #include "creature.h" #include "creatureevent.h" #include "databasetasks.h" #include "events.h" #include "game.h" #include "globalevent.h" #include "iologindata.h" #include "iomarket.h" #include "items.h" #include "monster.h" #include "movement.h" #include "scheduler.h" #include "server.h" #include "spells.h" #include "talkaction.h" #include "weapons.h" extern ConfigManager g_config; extern Actions* g_actions; extern Chat* g_chat; extern TalkActions* g_talkActions; extern Spells* g_spells; extern Vocations g_vocations; extern GlobalEvents* g_globalEvents; extern CreatureEvents* g_creatureEvents; extern Events* g_events; extern CreatureEvents* g_creatureEvents; extern Monsters g_monsters; extern MoveEvents* g_moveEvents; extern Weapons* g_weapons; Game::Game() { offlineTrainingWindow.choices.emplace_back("Sword Fighting and Shielding", SKILL_SWORD); offlineTrainingWindow.choices.emplace_back("Axe Fighting and Shielding", SKILL_AXE); offlineTrainingWindow.choices.emplace_back("Club Fighting and Shielding", SKILL_CLUB); offlineTrainingWindow.choices.emplace_back("Distance Fighting and Shielding", SKILL_DISTANCE); offlineTrainingWindow.choices.emplace_back("Magic Level and Shielding", SKILL_MAGLEVEL); offlineTrainingWindow.buttons.emplace_back("Okay", 1); offlineTrainingWindow.buttons.emplace_back("Cancel", 0); offlineTrainingWindow.defaultEnterButton = 1; offlineTrainingWindow.defaultEscapeButton = 0; offlineTrainingWindow.priority = true; } Game::~Game() { for (const auto& it : guilds) { delete it.second; } } void Game::start(ServiceManager* manager) { serviceManager = manager; g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this))); g_scheduler.addEvent(createSchedulerTask(EVENT_CREATURE_THINK_INTERVAL, std::bind(&Game::checkCreatures, this, 0))); g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this))); } GameState_t Game::getGameState() const { return gameState; } void Game::setWorldType(WorldType_t type) { worldType = type; } void Game::setGameState(GameState_t newState) { if (gameState == GAME_STATE_SHUTDOWN) { return; //this cannot be stopped } if (gameState == newState) { return; } gameState = newState; switch (newState) { case GAME_STATE_INIT: { loadExperienceStages(); groups.load(); g_chat->load(); map.spawns.startup(); raids.loadFromXml(); raids.startup(); quests.loadFromXml(); mounts.loadFromXml(); loadMotdNum(); loadPlayersRecord(); g_globalEvents->startup(); break; } case GAME_STATE_SHUTDOWN: { g_globalEvents->execute(GLOBALEVENT_SHUTDOWN); //kick all players that are still online auto it = players.begin(); while (it != players.end()) { it->second->kickPlayer(true); it = players.begin(); } saveMotdNum(); saveGameState(); g_dispatcher.addTask( createTask(std::bind(&Game::shutdown, this))); g_scheduler.stop(); g_databaseTasks.stop(); g_dispatcher.stop(); break; } case GAME_STATE_CLOSED: { /* kick all players without the CanAlwaysLogin flag */ auto it = players.begin(); while (it != players.end()) { if (!it->second->hasFlag(PlayerFlag_CanAlwaysLogin)) { it->second->kickPlayer(true); it = players.begin(); } else { ++it; } } saveGameState(); break; } default: break; } } void Game::saveGameState() { if (gameState == GAME_STATE_NORMAL) { setGameState(GAME_STATE_MAINTAIN); } std::cout << "Saving server..." << std::endl; for (const auto& it : players) { it.second->loginPosition = it.second->getPosition(); IOLoginData::savePlayer(it.second); } Map::save(); g_databaseTasks.flush(); if (gameState == GAME_STATE_MAINTAIN) { setGameState(GAME_STATE_NORMAL); } } bool Game::loadMainMap(const std::string& filename) { Monster::despawnRange = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRANGE); Monster::despawnRadius = g_config.getNumber(ConfigManager::DEFAULT_DESPAWNRADIUS); return map.loadMap("data/world/" + filename + ".otbm", true); } void Game::loadMap(const std::string& path) { map.loadMap(path, false); } Cylinder* Game::internalGetCylinder(Player* player, const Position& pos) const { if (pos.x != 0xFFFF) { return map.getTile(pos); } //container if (pos.y & 0x40) { uint8_t from_cid = pos.y & 0x0F; return player->getContainerByID(from_cid); } //inventory return player; } Thing* Game::internalGetThing(Player* player, const Position& pos, int32_t index, uint32_t spriteId, stackPosType_t type) const { if (pos.x != 0xFFFF) { Tile* tile = map.getTile(pos); if (!tile) { return nullptr; } Thing* thing; switch (type) { case STACKPOS_LOOK: { return tile->getTopVisibleThing(player); } case STACKPOS_MOVE: { Item* item = tile->getTopDownItem(); if (item && item->isMoveable()) { thing = item; } else { thing = tile->getTopVisibleCreature(player); } break; } case STACKPOS_USEITEM: { thing = tile->getUseItem(index); break; } case STACKPOS_TOPDOWN_ITEM: { thing = tile->getTopDownItem(); break; } case STACKPOS_USETARGET: { thing = tile->getTopVisibleCreature(player); if (!thing) { thing = tile->getUseItem(index); } break; } default: { thing = nullptr; break; } } if (player && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { //do extra checks here if the thing is accessable if (thing && thing->getItem()) { if (tile->hasProperty(CONST_PROP_ISVERTICAL)) { if (player->getPosition().x + 1 == tile->getPosition().x) { thing = nullptr; } } else { // horizontal if (player->getPosition().y + 1 == tile->getPosition().y) { thing = nullptr; } } } } return thing; } //container if (pos.y & 0x40) { uint8_t fromCid = pos.y & 0x0F; Container* parentContainer = player->getContainerByID(fromCid); if (!parentContainer) { return nullptr; } if (parentContainer->getID() == ITEM_BROWSEFIELD) { Tile* tile = parentContainer->getTile(); if (tile && tile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { if (tile->hasProperty(CONST_PROP_ISVERTICAL)) { if (player->getPosition().x + 1 == tile->getPosition().x) { return nullptr; } } else { // horizontal if (player->getPosition().y + 1 == tile->getPosition().y) { return nullptr; } } } } uint8_t slot = pos.z; return parentContainer->getItemByIndex(player->getContainerIndex(fromCid) + slot); } else if (pos.y == 0 && pos.z == 0) { const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return nullptr; } int32_t subType; if (it.isFluidContainer() && index < static_cast<int32_t>(sizeof(reverseFluidMap) / sizeof(uint8_t))) { subType = reverseFluidMap[index]; } else { subType = -1; } return findItemOfType(player, it.id, true, subType); } //inventory slots_t slot = static_cast<slots_t>(pos.y); return player->getInventoryItem(slot); } void Game::internalGetPosition(Item* item, Position& pos, uint8_t& stackpos) { pos.x = 0; pos.y = 0; pos.z = 0; stackpos = 0; Cylinder* topParent = item->getTopParent(); if (topParent) { if (Player* player = dynamic_cast<Player*>(topParent)) { pos.x = 0xFFFF; Container* container = dynamic_cast<Container*>(item->getParent()); if (container) { pos.y = static_cast<uint16_t>(0x40) | static_cast<uint16_t>(player->getContainerID(container)); pos.z = container->getThingIndex(item); stackpos = pos.z; } else { pos.y = player->getThingIndex(item); stackpos = pos.y; } } else if (Tile* tile = topParent->getTile()) { pos = tile->getPosition(); stackpos = tile->getThingIndex(item); } } } Creature* Game::getCreatureByID(uint32_t id) { if (id <= Player::playerAutoID) { return getPlayerByID(id); } else if (id <= Monster::monsterAutoID) { return getMonsterByID(id); } else if (id <= Npc::npcAutoID) { return getNpcByID(id); } return nullptr; } Monster* Game::getMonsterByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = monsters.find(id); if (it == monsters.end()) { return nullptr; } return it->second; } Npc* Game::getNpcByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = npcs.find(id); if (it == npcs.end()) { return nullptr; } return it->second; } Player* Game::getPlayerByID(uint32_t id) { if (id == 0) { return nullptr; } auto it = players.find(id); if (it == players.end()) { return nullptr; } return it->second; } Creature* Game::getCreatureByName(const std::string& s) { if (s.empty()) { return nullptr; } const std::string& lowerCaseName = asLowerCaseString(s); auto m_it = mappedPlayerNames.find(lowerCaseName); if (m_it != mappedPlayerNames.end()) { return m_it->second; } for (const auto& it : npcs) { if (lowerCaseName == asLowerCaseString(it.second->getName())) { return it.second; } } for (const auto& it : monsters) { if (lowerCaseName == asLowerCaseString(it.second->getName())) { return it.second; } } return nullptr; } Npc* Game::getNpcByName(const std::string& s) { if (s.empty()) { return nullptr; } const char* npcName = s.c_str(); for (const auto& it : npcs) { if (strcasecmp(npcName, it.second->getName().c_str()) == 0) { return it.second; } } return nullptr; } Player* Game::getPlayerByName(const std::string& s) { if (s.empty()) { return nullptr; } auto it = mappedPlayerNames.find(asLowerCaseString(s)); if (it == mappedPlayerNames.end()) { return nullptr; } return it->second; } Player* Game::getPlayerByGUID(const uint32_t& guid) { if (guid == 0) { return nullptr; } for (const auto& it : players) { if (guid == it.second->getGUID()) { return it.second; } } return nullptr; } ReturnValue Game::getPlayerByNameWildcard(const std::string& s, Player*& player) { size_t strlen = s.length(); if (strlen == 0 || strlen > 20) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; } if (s.back() == '~') { const std::string& query = asLowerCaseString(s.substr(0, strlen - 1)); std::string result; ReturnValue ret = wildcardTree.findOne(query, result); if (ret != RETURNVALUE_NOERROR) { return ret; } player = getPlayerByName(result); } else { player = getPlayerByName(s); } if (!player) { return RETURNVALUE_PLAYERWITHTHISNAMEISNOTONLINE; } return RETURNVALUE_NOERROR; } Player* Game::getPlayerByAccount(uint32_t acc) { for (const auto& it : players) { if (it.second->getAccount() == acc) { return it.second; } } return nullptr; } bool Game::internalPlaceCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/) { if (creature->getParent() != nullptr) { return false; } if (!map.placeCreature(pos, creature, extendedPos, forced)) { return false; } creature->incrementReferenceCounter(); creature->setID(); creature->addList(); return true; } bool Game::placeCreature(Creature* creature, const Position& pos, bool extendedPos /*=false*/, bool forced /*= false*/) { if (!internalPlaceCreature(creature, pos, extendedPos, forced)) { return false; } SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true); for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendCreatureAppear(creature, creature->getPosition(), true); } } for (Creature* spectator : spectators) { spectator->onCreatureAppear(creature, true); } creature->getParent()->postAddNotification(creature, nullptr, 0); addCreatureCheck(creature); creature->onPlacedCreature(); return true; } bool Game::removeCreature(Creature* creature, bool isLogout/* = true*/) { if (creature->isRemoved()) { return false; } Tile* tile = creature->getTile(); std::vector<int32_t> oldStackPosVector; SpectatorHashSet spectators; map.getSpectators(spectators, tile->getPosition(), true); for (Creature* spectator : spectators) { if (Player* player = spectator->getPlayer()) { oldStackPosVector.push_back(player->canSeeCreature(creature) ? tile->getStackposOfCreature(player, creature) : -1); } } tile->removeCreature(creature); const Position& tilePosition = tile->getPosition(); //send to client size_t i = 0; for (Creature* spectator : spectators) { if (Player* player = spectator->getPlayer()) { player->sendRemoveTileThing(tilePosition, oldStackPosVector[i++]); } } //event method for (Creature* spectator : spectators) { spectator->onRemoveCreature(creature, isLogout); } creature->getParent()->postRemoveNotification(creature, nullptr, 0); creature->removeList(); creature->setRemoved(); ReleaseCreature(creature); removeCreatureCheck(creature); for (Creature* summon : creature->summons) { summon->setSkillLoss(false); removeCreature(summon); } return true; } void Game::playerMoveThing(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint8_t fromIndex = 0; if (fromPos.x == 0xFFFF) { if (fromPos.y & 0x40) { fromIndex = fromPos.z; } else { fromIndex = static_cast<uint8_t>(fromPos.y); } } else { fromIndex = fromStackPos; } Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (Creature* movingCreature = thing->getCreature()) { Tile* tile = map.getTile(toPos); if (!tile) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (Position::areInRange<1, 1, 0>(movingCreature->getPosition(), player->getPosition())) { SchedulerTask* task = createSchedulerTask(1000, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreature->getPosition(), tile->getPosition())); player->setNextActionTask(task); } else { playerMoveCreature(player, movingCreature, movingCreature->getPosition(), tile); } } else if (thing->getItem()) { Cylinder* toCylinder = internalGetCylinder(player, toPos); if (!toCylinder) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, thing->getItem(), toCylinder); } } void Game::playerMoveCreatureByID(uint32_t playerId, uint32_t movingCreatureId, const Position& movingCreatureOrigPos, const Position& toPos) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* movingCreature = getCreatureByID(movingCreatureId); if (!movingCreature) { return; } Tile* toTile = map.getTile(toPos); if (!toTile) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } playerMoveCreature(player, movingCreature, movingCreatureOrigPos, toTile); } void Game::playerMoveCreature(Player* player, Creature* movingCreature, const Position& movingCreatureOrigPos, Tile* toTile) { if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition())); player->setNextActionTask(task); return; } player->setNextActionTask(nullptr); if (!Position::areInRange<1, 1, 0>(movingCreatureOrigPos, player->getPosition())) { //need to walk to the creature first before moving it std::forward_list<Direction> listDir; if (player->getPathTo(movingCreatureOrigPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(1500, std::bind(&Game::playerMoveCreatureByID, this, player->getID(), movingCreature->getID(), movingCreatureOrigPos, toTile->getPosition())); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } if ((!movingCreature->isPushable() && !player->hasFlag(PlayerFlag_CanPushAllCreatures)) || (movingCreature->isInGhostMode() && !player->isAccessPlayer())) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } //check throw distance const Position& movingCreaturePos = movingCreature->getPosition(); const Position& toPos = toTile->getPosition(); if ((Position::getDistanceX(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceY(movingCreaturePos, toPos) > movingCreature->getThrowRange()) || (Position::getDistanceZ(movingCreaturePos, toPos) * 4 > movingCreature->getThrowRange())) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (player != movingCreature) { if (toTile->hasFlag(TILESTATE_BLOCKPATH)) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } else if ((movingCreature->getZone() == ZONE_PROTECTION && !toTile->hasFlag(TILESTATE_PROTECTIONZONE)) || (movingCreature->getZone() == ZONE_NOPVP && !toTile->hasFlag(TILESTATE_NOPVPZONE))) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } else { if (CreatureVector* tileCreatures = toTile->getCreatures()) { for (Creature* tileCreature : *tileCreatures) { if (!tileCreature->isInGhostMode()) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } } } Npc* movingNpc = movingCreature->getNpc(); if (movingNpc && !Spawns::isInZone(movingNpc->getMasterPos(), movingNpc->getMasterRadius(), toPos)) { player->sendCancelMessage(RETURNVALUE_NOTENOUGHROOM); return; } } } if (!g_events->eventPlayerOnMoveCreature(player, movingCreature, movingCreaturePos, toPos)) { return; } ReturnValue ret = internalMoveCreature(*movingCreature, *toTile); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); } } ReturnValue Game::internalMoveCreature(Creature* creature, Direction direction, uint32_t flags /*= 0*/) { creature->setLastPosition(creature->getPosition()); const Position& currentPos = creature->getPosition(); Position destPos = getNextPosition(direction, currentPos); Player* player = creature->getPlayer(); bool diagonalMovement = (direction & DIRECTION_DIAGONAL_MASK) != 0; if (player && !diagonalMovement) { //try go up if (currentPos.z != 8 && creature->getTile()->hasHeight(3)) { Tile* tmpTile = map.getTile(currentPos.x, currentPos.y, currentPos.getZ() - 1); if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) { tmpTile = map.getTile(destPos.x, destPos.y, destPos.getZ() - 1); if (tmpTile && tmpTile->getGround() && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID)) { flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE; if (!tmpTile->hasFlag(TILESTATE_FLOORCHANGE)) { player->setDirection(direction); destPos.z--; } } } } //try go down if (currentPos.z != 7 && currentPos.z == destPos.z) { Tile* tmpTile = map.getTile(destPos.x, destPos.y, destPos.z); if (tmpTile == nullptr || (tmpTile->getGround() == nullptr && !tmpTile->hasFlag(TILESTATE_BLOCKSOLID))) { tmpTile = map.getTile(destPos.x, destPos.y, destPos.z + 1); if (tmpTile && tmpTile->hasHeight(3)) { flags |= FLAG_IGNOREBLOCKITEM | FLAG_IGNOREBLOCKCREATURE; player->setDirection(direction); destPos.z++; } } } } Tile* toTile = map.getTile(destPos); if (!toTile) { return RETURNVALUE_NOTPOSSIBLE; } return internalMoveCreature(*creature, *toTile, flags); } ReturnValue Game::internalMoveCreature(Creature& creature, Tile& toTile, uint32_t flags /*= 0*/) { //check if we can move the creature to the destination ReturnValue ret = toTile.queryAdd(0, creature, 1, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } map.moveCreature(creature, toTile); if (creature.getParent() != &toTile) { return RETURNVALUE_NOERROR; } int32_t index = 0; Item* toItem = nullptr; Tile* subCylinder = nullptr; Tile* toCylinder = &toTile; Tile* fromCylinder = nullptr; uint32_t n = 0; while ((subCylinder = toCylinder->queryDestination(index, creature, &toItem, flags)) != toCylinder) { map.moveCreature(creature, *subCylinder); if (creature.getParent() != subCylinder) { //could happen if a script move the creature fromCylinder = nullptr; break; } fromCylinder = toCylinder; toCylinder = subCylinder; flags = 0; //to prevent infinite loop if (++n >= MAP_MAX_LAYERS) { break; } } if (fromCylinder) { const Position& fromPosition = fromCylinder->getPosition(); const Position& toPosition = toCylinder->getPosition(); if (fromPosition.z != toPosition.z && (fromPosition.x != toPosition.x || fromPosition.y != toPosition.y)) { Direction dir = getDirectionTo(fromPosition, toPosition); if ((dir & DIRECTION_DIAGONAL_MASK) == 0) { internalCreatureTurn(&creature, dir); } } } return RETURNVALUE_NOERROR; } void Game::playerMoveItemByPlayerID(uint32_t playerId, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } playerMoveItem(player, fromPos, spriteId, fromStackPos, toPos, count, nullptr, nullptr); } void Game::playerMoveItem(Player* player, const Position& fromPos, uint16_t spriteId, uint8_t fromStackPos, const Position& toPos, uint8_t count, Item* item, Cylinder* toCylinder) { if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), fromPos, spriteId, fromStackPos, toPos, count)); player->setNextActionTask(task); return; } player->setNextActionTask(nullptr); if (item == nullptr) { uint8_t fromIndex = 0; if (fromPos.x == 0xFFFF) { if (fromPos.y & 0x40) { fromIndex = fromPos.z; } else { fromIndex = static_cast<uint8_t>(fromPos.y); } } else { fromIndex = fromStackPos; } Thing* thing = internalGetThing(player, fromPos, fromIndex, 0, STACKPOS_MOVE); if (!thing || !thing->getItem()) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } item = thing->getItem(); } if (item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Cylinder* fromCylinder = internalGetCylinder(player, fromPos); if (fromCylinder == nullptr) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (toCylinder == nullptr) { toCylinder = internalGetCylinder(player, toPos); if (toCylinder == nullptr) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } if (!item->isPushable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTMOVEABLE); return; } const Position& playerPos = player->getPosition(); const Position& mapFromPos = fromCylinder->getTile()->getPosition(); if (playerPos.z != mapFromPos.z) { player->sendCancelMessage(playerPos.z > mapFromPos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(playerPos, mapFromPos)) { //need to walk to the item first before using it std::forward_list<Direction> listDir; if (player->getPathTo(item->getPosition(), listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), fromPos, spriteId, fromStackPos, toPos, count)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } const Tile* toCylinderTile = toCylinder->getTile(); const Position& mapToPos = toCylinderTile->getPosition(); //hangable item specific code if (item->isHangable() && toCylinderTile->hasFlag(TILESTATE_SUPPORTS_HANGABLE)) { //destination supports hangable objects so need to move there first bool vertical = toCylinderTile->hasProperty(CONST_PROP_ISVERTICAL); if (vertical) { if (playerPos.x + 1 == mapToPos.x) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } else { // horizontal if (playerPos.y + 1 == mapToPos.y) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } } if (!Position::areInRange<1, 1, 0>(playerPos, mapToPos)) { Position walkPos = mapToPos; if (vertical) { walkPos.x++; } else { walkPos.y++; } Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && Position::areInRange<1, 1>(mapFromPos, playerPos) && !Position::areInRange<1, 1, 0>(mapFromPos, walkPos)) { //need to pickup the item first Item* moveItem = nullptr; ReturnValue ret = internalMoveItem(fromCylinder, player, INDEX_WHEREEVER, item, count, &moveItem); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::forward_list<Direction> listDir; if (player->getPathTo(walkPos, listDir, 0, 0, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerMoveItemByPlayerID, this, player->getID(), itemPos, spriteId, itemStackPos, toPos, count)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } } if ((Position::getDistanceX(playerPos, mapToPos) > item->getThrowRange()) || (Position::getDistanceY(playerPos, mapToPos) > item->getThrowRange()) || (Position::getDistanceZ(mapFromPos, mapToPos) * 4 > item->getThrowRange())) { player->sendCancelMessage(RETURNVALUE_DESTINATIONOUTOFREACH); return; } if (!canThrowObjectTo(mapFromPos, mapToPos)) { player->sendCancelMessage(RETURNVALUE_CANNOTTHROW); return; } if (!g_events->eventPlayerOnMoveItem(player, item, count, fromPos, toPos, fromCylinder, toCylinder)) { return; } uint8_t toIndex = 0; if (toPos.x == 0xFFFF) { if (toPos.y & 0x40) { toIndex = toPos.z; } else { toIndex = static_cast<uint8_t>(toPos.y); } } ReturnValue ret = internalMoveItem(fromCylinder, toCylinder, toIndex, item, count, nullptr, 0, player); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); } else { g_events->eventPlayerOnItemMoved(player, item, count, fromPos, toPos, fromCylinder, toCylinder); } } ReturnValue Game::internalMoveItem(Cylinder* fromCylinder, Cylinder* toCylinder, int32_t index, Item* item, uint32_t count, Item** _moveItem, uint32_t flags /*= 0*/, Creature* actor/* = nullptr*/, Item* tradeItem/* = nullptr*/) { Tile* fromTile = fromCylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == fromCylinder) { fromCylinder = fromTile; } } Item* toItem = nullptr; Cylinder* subCylinder; int floorN = 0; while ((subCylinder = toCylinder->queryDestination(index, *item, &toItem, flags)) != toCylinder) { toCylinder = subCylinder; flags = 0; //to prevent infinite loop if (++floorN >= MAP_MAX_LAYERS) { break; } } //destination is the same as the source? if (item == toItem) { return RETURNVALUE_NOERROR; //silently ignore move } //check if we can add this item ReturnValue ret = toCylinder->queryAdd(index, *item, count, flags, actor); if (ret == RETURNVALUE_NEEDEXCHANGE) { //check if we can add it to source cylinder ret = fromCylinder->queryAdd(fromCylinder->getThingIndex(item), *toItem, toItem->getItemCount(), 0); if (ret == RETURNVALUE_NOERROR) { //check how much we can move uint32_t maxExchangeQueryCount = 0; ReturnValue retExchangeMaxCount = fromCylinder->queryMaxCount(INDEX_WHEREEVER, *toItem, toItem->getItemCount(), maxExchangeQueryCount, 0); if (retExchangeMaxCount != RETURNVALUE_NOERROR && maxExchangeQueryCount == 0) { return retExchangeMaxCount; } if (toCylinder->queryRemove(*toItem, toItem->getItemCount(), flags) == RETURNVALUE_NOERROR) { int32_t oldToItemIndex = toCylinder->getThingIndex(toItem); toCylinder->removeThing(toItem, toItem->getItemCount()); fromCylinder->addThing(toItem); if (oldToItemIndex != -1) { toCylinder->postRemoveNotification(toItem, fromCylinder, oldToItemIndex); } int32_t newToItemIndex = fromCylinder->getThingIndex(toItem); if (newToItemIndex != -1) { fromCylinder->postAddNotification(toItem, toCylinder, newToItemIndex); } ret = toCylinder->queryAdd(index, *item, count, flags); toItem = nullptr; } } } if (ret != RETURNVALUE_NOERROR) { return ret; } //check how much we can move uint32_t maxQueryCount = 0; ReturnValue retMaxCount = toCylinder->queryMaxCount(index, *item, count, maxQueryCount, flags); if (retMaxCount != RETURNVALUE_NOERROR && maxQueryCount == 0) { return retMaxCount; } uint32_t m; if (item->isStackable()) { m = std::min<uint32_t>(count, maxQueryCount); } else { m = maxQueryCount; } Item* moveItem = item; //check if we can remove this item ret = fromCylinder->queryRemove(*item, m, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } if (tradeItem) { if (toCylinder->getItem() == tradeItem) { return RETURNVALUE_NOTENOUGHROOM; } Cylinder* tmpCylinder = toCylinder->getParent(); while (tmpCylinder) { if (tmpCylinder->getItem() == tradeItem) { return RETURNVALUE_NOTENOUGHROOM; } tmpCylinder = tmpCylinder->getParent(); } } //remove the item int32_t itemIndex = fromCylinder->getThingIndex(item); Item* updateItem = nullptr; fromCylinder->removeThing(item, m); //update item(s) if (item->isStackable()) { uint32_t n; if (item->equals(toItem)) { n = std::min<uint32_t>(100 - toItem->getItemCount(), m); toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n); updateItem = toItem; } else { n = 0; } int32_t newCount = m - n; if (newCount > 0) { moveItem = item->clone(); moveItem->setItemCount(newCount); } else { moveItem = nullptr; } if (item->isRemoved()) { ReleaseItem(item); } } //add item if (moveItem /*m - n > 0*/) { toCylinder->addThing(index, moveItem); } if (itemIndex != -1) { fromCylinder->postRemoveNotification(item, toCylinder, itemIndex); } if (moveItem) { int32_t moveItemIndex = toCylinder->getThingIndex(moveItem); if (moveItemIndex != -1) { toCylinder->postAddNotification(moveItem, fromCylinder, moveItemIndex); } } if (updateItem) { int32_t updateItemIndex = toCylinder->getThingIndex(updateItem); if (updateItemIndex != -1) { toCylinder->postAddNotification(updateItem, fromCylinder, updateItemIndex); } } if (_moveItem) { if (moveItem) { *_moveItem = moveItem; } else { *_moveItem = item; } } //we could not move all, inform the player if (item->isStackable() && maxQueryCount < count) { return retMaxCount; } return ret; } ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index /*= INDEX_WHEREEVER*/, uint32_t flags/* = 0*/, bool test/* = false*/) { uint32_t remainderCount = 0; return internalAddItem(toCylinder, item, index, flags, test, remainderCount); } ReturnValue Game::internalAddItem(Cylinder* toCylinder, Item* item, int32_t index, uint32_t flags, bool test, uint32_t& remainderCount) { if (toCylinder == nullptr || item == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } Cylinder* destCylinder = toCylinder; Item* toItem = nullptr; toCylinder = toCylinder->queryDestination(index, *item, &toItem, flags); //check if we can add this item ReturnValue ret = toCylinder->queryAdd(index, *item, item->getItemCount(), flags); if (ret != RETURNVALUE_NOERROR) { return ret; } /* Check if we can move add the whole amount, we do this by checking against the original cylinder, since the queryDestination can return a cylinder that might only hold a part of the full amount. */ uint32_t maxQueryCount = 0; ret = destCylinder->queryMaxCount(INDEX_WHEREEVER, *item, item->getItemCount(), maxQueryCount, flags); if (ret != RETURNVALUE_NOERROR) { return ret; } if (test) { return RETURNVALUE_NOERROR; } if (item->isStackable() && item->equals(toItem)) { uint32_t m = std::min<uint32_t>(item->getItemCount(), maxQueryCount); uint32_t n = std::min<uint32_t>(100 - toItem->getItemCount(), m); toCylinder->updateThing(toItem, toItem->getID(), toItem->getItemCount() + n); int32_t count = m - n; if (count > 0) { if (item->getItemCount() != count) { Item* remainderItem = item->clone(); remainderItem->setItemCount(count); if (internalAddItem(destCylinder, remainderItem, INDEX_WHEREEVER, flags, false) != RETURNVALUE_NOERROR) { ReleaseItem(remainderItem); remainderCount = count; } } else { toCylinder->addThing(index, item); int32_t itemIndex = toCylinder->getThingIndex(item); if (itemIndex != -1) { toCylinder->postAddNotification(item, nullptr, itemIndex); } } } else { //fully merged with toItem, item will be destroyed item->onRemoved(); ReleaseItem(item); int32_t itemIndex = toCylinder->getThingIndex(toItem); if (itemIndex != -1) { toCylinder->postAddNotification(toItem, nullptr, itemIndex); } } } else { toCylinder->addThing(index, item); int32_t itemIndex = toCylinder->getThingIndex(item); if (itemIndex != -1) { toCylinder->postAddNotification(item, nullptr, itemIndex); } } return RETURNVALUE_NOERROR; } ReturnValue Game::internalRemoveItem(Item* item, int32_t count /*= -1*/, bool test /*= false*/, uint32_t flags /*= 0*/) { Cylinder* cylinder = item->getParent(); if (cylinder == nullptr) { return RETURNVALUE_NOTPOSSIBLE; } Tile* fromTile = cylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == cylinder) { cylinder = fromTile; } } if (count == -1) { count = item->getItemCount(); } //check if we can remove this item ReturnValue ret = cylinder->queryRemove(*item, count, flags | FLAG_IGNORENOTMOVEABLE); if (ret != RETURNVALUE_NOERROR) { return ret; } if (!item->canRemove()) { return RETURNVALUE_NOTPOSSIBLE; } if (!test) { int32_t index = cylinder->getThingIndex(item); //remove the item cylinder->removeThing(item, count); if (item->isRemoved()) { item->onRemoved(); ReleaseItem(item); } cylinder->postRemoveNotification(item, nullptr, index); } return RETURNVALUE_NOERROR; } ReturnValue Game::internalPlayerAddItem(Player* player, Item* item, bool dropOnMap /*= true*/, slots_t slot /*= CONST_SLOT_WHEREEVER*/) { uint32_t remainderCount = 0; ReturnValue ret = internalAddItem(player, item, static_cast<int32_t>(slot), 0, false, remainderCount); if (remainderCount != 0) { Item* remainderItem = Item::CreateItem(item->getID(), remainderCount); ReturnValue remaindRet = internalAddItem(player->getTile(), remainderItem, INDEX_WHEREEVER, FLAG_NOLIMIT); if (remaindRet != RETURNVALUE_NOERROR) { ReleaseItem(remainderItem); } } if (ret != RETURNVALUE_NOERROR && dropOnMap) { ret = internalAddItem(player->getTile(), item, INDEX_WHEREEVER, FLAG_NOLIMIT); } return ret; } Item* Game::findItemOfType(Cylinder* cylinder, uint16_t itemId, bool depthSearch /*= true*/, int32_t subType /*= -1*/) const { if (cylinder == nullptr) { return nullptr; } std::vector<Container*> containers; for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) { Thing* thing = cylinder->getThing(i); if (!thing) { continue; } Item* item = thing->getItem(); if (!item) { continue; } if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { return item; } if (depthSearch) { Container* container = item->getContainer(); if (container) { containers.push_back(container); } } } size_t i = 0; while (i < containers.size()) { Container* container = containers[i++]; for (Item* item : container->getItemList()) { if (item->getID() == itemId && (subType == -1 || subType == item->getSubType())) { return item; } Container* subContainer = item->getContainer(); if (subContainer) { containers.push_back(subContainer); } } } return nullptr; } bool Game::removeMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/) { if (cylinder == nullptr) { return false; } if (money == 0) { return true; } std::vector<Container*> containers; std::multimap<uint32_t, Item*> moneyMap; uint64_t moneyCount = 0; for (size_t i = cylinder->getFirstIndex(), j = cylinder->getLastIndex(); i < j; ++i) { Thing* thing = cylinder->getThing(i); if (!thing) { continue; } Item* item = thing->getItem(); if (!item) { continue; } Container* container = item->getContainer(); if (container) { containers.push_back(container); } else { const uint32_t worth = item->getWorth(); if (worth != 0) { moneyCount += worth; moneyMap.emplace(worth, item); } } } size_t i = 0; while (i < containers.size()) { Container* container = containers[i++]; for (Item* item : container->getItemList()) { Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } else { const uint32_t worth = item->getWorth(); if (worth != 0) { moneyCount += worth; moneyMap.emplace(worth, item); } } } } if (moneyCount < money) { return false; } for (const auto& moneyEntry : moneyMap) { Item* item = moneyEntry.second; if (moneyEntry.first < money) { internalRemoveItem(item); money -= moneyEntry.first; } else if (moneyEntry.first > money) { const uint32_t worth = moneyEntry.first / item->getItemCount(); const uint32_t removeCount = std::ceil(money / static_cast<double>(worth)); addMoney(cylinder, (worth * removeCount) - money, flags); internalRemoveItem(item, removeCount); break; } else { internalRemoveItem(item); break; } } return true; } void Game::addMoney(Cylinder* cylinder, uint64_t money, uint32_t flags /*= 0*/) { if (money == 0) { return; } uint32_t crystalCoins = money / 10000; money -= crystalCoins * 10000; while (crystalCoins > 0) { const uint16_t count = std::min<uint32_t>(100, crystalCoins); Item* remaindItem = Item::CreateItem(ITEM_CRYSTAL_COIN, count); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } crystalCoins -= count; } uint16_t platinumCoins = money / 100; if (platinumCoins != 0) { Item* remaindItem = Item::CreateItem(ITEM_PLATINUM_COIN, platinumCoins); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } money -= platinumCoins * 100; } if (money != 0) { Item* remaindItem = Item::CreateItem(ITEM_GOLD_COIN, money); ReturnValue ret = internalAddItem(cylinder, remaindItem, INDEX_WHEREEVER, flags); if (ret != RETURNVALUE_NOERROR) { internalAddItem(cylinder->getTile(), remaindItem, INDEX_WHEREEVER, FLAG_NOLIMIT); } } } Item* Game::transformItem(Item* item, uint16_t newId, int32_t newCount /*= -1*/) { if (item->getID() == newId && (newCount == -1 || (newCount == item->getSubType() && newCount != 0))) { //chargeless item placed on map = infinite return item; } Cylinder* cylinder = item->getParent(); if (cylinder == nullptr) { return nullptr; } Tile* fromTile = cylinder->getTile(); if (fromTile) { auto it = browseFields.find(fromTile); if (it != browseFields.end() && it->second == cylinder) { cylinder = fromTile; } } int32_t itemIndex = cylinder->getThingIndex(item); if (itemIndex == -1) { return item; } if (!item->canTransform()) { return item; } const ItemType& newType = Item::items[newId]; if (newType.id == 0) { return item; } const ItemType& curType = Item::items[item->getID()]; if (curType.alwaysOnTop != newType.alwaysOnTop) { //This only occurs when you transform items on tiles from a downItem to a topItem (or vice versa) //Remove the old, and add the new cylinder->removeThing(item, item->getItemCount()); cylinder->postRemoveNotification(item, cylinder, itemIndex); item->setID(newId); if (newCount != -1) { item->setSubType(newCount); } cylinder->addThing(item); Cylinder* newParent = item->getParent(); if (newParent == nullptr) { ReleaseItem(item); return nullptr; } newParent->postAddNotification(item, cylinder, newParent->getThingIndex(item)); return item; } if (curType.type == newType.type) { //Both items has the same type so we can safely change id/subtype if (newCount == 0 && (item->isStackable() || item->hasAttribute(ITEM_ATTRIBUTE_CHARGES))) { if (item->isStackable()) { internalRemoveItem(item); return nullptr; } else { int32_t newItemId = newId; if (curType.id == newType.id) { newItemId = curType.decayTo; } if (newItemId < 0) { internalRemoveItem(item); return nullptr; } else if (newItemId != newId) { //Replacing the the old item with the new while maintaining the old position Item* newItem = Item::CreateItem(newItemId, 1); if (newItem == nullptr) { return nullptr; } cylinder->replaceThing(itemIndex, newItem); cylinder->postAddNotification(newItem, cylinder, itemIndex); item->setParent(nullptr); cylinder->postRemoveNotification(item, cylinder, itemIndex); ReleaseItem(item); return newItem; } else { return transformItem(item, newItemId); } } } else { cylinder->postRemoveNotification(item, cylinder, itemIndex); uint16_t itemId = item->getID(); int32_t count = item->getSubType(); if (curType.id != newType.id) { if (newType.group != curType.group) { item->setDefaultSubtype(); } itemId = newId; } if (newCount != -1 && newType.hasSubType()) { count = newCount; } cylinder->updateThing(item, itemId, count); cylinder->postAddNotification(item, cylinder, itemIndex); return item; } } //Replacing the the old item with the new while maintaining the old position Item* newItem; if (newCount == -1) { newItem = Item::CreateItem(newId); } else { newItem = Item::CreateItem(newId, newCount); } if (newItem == nullptr) { return nullptr; } cylinder->replaceThing(itemIndex, newItem); cylinder->postAddNotification(newItem, cylinder, itemIndex); item->setParent(nullptr); cylinder->postRemoveNotification(item, cylinder, itemIndex); ReleaseItem(item); return newItem; } ReturnValue Game::internalTeleport(Thing* thing, const Position& newPos, bool pushMove/* = true*/, uint32_t flags /*= 0*/) { if (newPos == thing->getPosition()) { return RETURNVALUE_NOERROR; } else if (thing->isRemoved()) { return RETURNVALUE_NOTPOSSIBLE; } Tile* toTile = map.getTile(newPos); if (!toTile) { return RETURNVALUE_NOTPOSSIBLE; } if (Creature* creature = thing->getCreature()) { ReturnValue ret = toTile->queryAdd(0, *creature, 1, FLAG_NOLIMIT); if (ret != RETURNVALUE_NOERROR) { return ret; } map.moveCreature(*creature, *toTile, !pushMove); return RETURNVALUE_NOERROR; } else if (Item* item = thing->getItem()) { return internalMoveItem(item->getParent(), toTile, INDEX_WHEREEVER, item, item->getItemCount(), nullptr, flags); } return RETURNVALUE_NOTPOSSIBLE; } Item* searchForItem(Container* container, uint16_t itemId) { for (ContainerIterator it = container->iterator(); it.hasNext(); it.advance()) { if ((*it)->getID() == itemId) { return *it; } } return nullptr; } slots_t getSlotType(const ItemType& it) { slots_t slot = CONST_SLOT_RIGHT; if (it.weaponType != WeaponType_t::WEAPON_SHIELD) { int32_t slotPosition = it.slotPosition; if (slotPosition & SLOTP_HEAD) { slot = CONST_SLOT_HEAD; } else if (slotPosition & SLOTP_NECKLACE) { slot = CONST_SLOT_NECKLACE; } else if (slotPosition & SLOTP_ARMOR) { slot = CONST_SLOT_ARMOR; } else if (slotPosition & SLOTP_LEGS) { slot = CONST_SLOT_LEGS; } else if (slotPosition & SLOTP_FEET) { slot = CONST_SLOT_FEET ; } else if (slotPosition & SLOTP_RING) { slot = CONST_SLOT_RING; } else if (slotPosition & SLOTP_AMMO) { slot = CONST_SLOT_AMMO; } else if (slotPosition & SLOTP_TWO_HAND || slotPosition & SLOTP_LEFT) { slot = CONST_SLOT_LEFT; } } return slot; } //Implementation of player invoked events void Game::playerEquipItem(uint32_t playerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Item* item = player->getInventoryItem(CONST_SLOT_BACKPACK); if (!item) { return; } Container* backpack = item->getContainer(); if (!backpack) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); slots_t slot = getSlotType(it); Item* slotItem = player->getInventoryItem(slot); Item* equipItem = searchForItem(backpack, it.id); if (slotItem && slotItem->getID() == it.id && (!it.stackable || slotItem->getItemCount() == 100 || !equipItem)) { internalMoveItem(slotItem->getParent(), player, CONST_SLOT_WHEREEVER, slotItem, slotItem->getItemCount(), nullptr); } else if (equipItem) { internalMoveItem(equipItem->getParent(), player, slot, equipItem, equipItem->getItemCount(), nullptr); } } void Game::playerMove(uint32_t playerId, Direction direction) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); player->setNextWalkActionTask(nullptr); player->startAutoWalk(std::forward_list<Direction> { direction }); } bool Game::playerBroadcastMessage(Player* player, const std::string& text) const { if (!player->hasFlag(PlayerFlag_CanBroadcast)) { return false; } std::cout << "> " << player->getName() << " broadcasted: \"" << text << "\"." << std::endl; for (const auto& it : players) { it.second->sendPrivateMessage(player, TALKTYPE_BROADCAST, text); } return true; } void Game::playerCreatePrivateChannel(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player || !player->isPremium()) { return; } ChatChannel* channel = g_chat->createChannel(*player, CHANNEL_PRIVATE); if (!channel || !channel->addUser(*player)) { return; } player->sendCreatePrivateChannel(channel->getId(), channel->getName()); } void Game::playerChannelInvite(uint32_t playerId, const std::string& name) { Player* player = getPlayerByID(playerId); if (!player) { return; } PrivateChatChannel* channel = g_chat->getPrivateChannel(*player); if (!channel) { return; } Player* invitePlayer = getPlayerByName(name); if (!invitePlayer) { return; } if (player == invitePlayer) { return; } channel->invitePlayer(*player, *invitePlayer); } void Game::playerChannelExclude(uint32_t playerId, const std::string& name) { Player* player = getPlayerByID(playerId); if (!player) { return; } PrivateChatChannel* channel = g_chat->getPrivateChannel(*player); if (!channel) { return; } Player* excludePlayer = getPlayerByName(name); if (!excludePlayer) { return; } if (player == excludePlayer) { return; } channel->excludePlayer(*player, *excludePlayer); } void Game::playerRequestChannels(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendChannelsDialog(); } void Game::playerOpenChannel(uint32_t playerId, uint16_t channelId) { Player* player = getPlayerByID(playerId); if (!player) { return; } ChatChannel* channel = g_chat->addUserToChannel(*player, channelId); if (!channel) { return; } const InvitedMap* invitedUsers = channel->getInvitedUsers(); const UsersMap* users; if (!channel->isPublicChannel()) { users = &channel->getUsers(); } else { users = nullptr; } player->sendChannel(channel->getId(), channel->getName(), users, invitedUsers); } void Game::playerCloseChannel(uint32_t playerId, uint16_t channelId) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_chat->removeUserFromChannel(*player, channelId); } void Game::playerOpenPrivateChannel(uint32_t playerId, std::string& receiver) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!IOLoginData::formatPlayerName(receiver)) { player->sendCancelMessage("A player with this name does not exist."); return; } if (player->getName() == receiver) { player->sendCancelMessage("You cannot set up a private message channel with yourself."); return; } player->sendOpenPrivateChannel(receiver); } void Game::playerCloseNpcChannel(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } SpectatorHashSet spectators; map.getSpectators(spectators, player->getPosition()); for (Creature* spectator : spectators) { if (Npc* npc = spectator->getNpc()) { npc->onPlayerCloseChannel(player); } } } void Game::playerReceivePing(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->receivePing(); } void Game::playerReceivePingBack(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendPingBack(); } void Game::playerAutoWalk(uint32_t playerId, const std::forward_list<Direction>& listDir) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); player->setNextWalkTask(nullptr); player->startAutoWalk(listDir); } void Game::playerStopAutoWalk(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->stopWalk(); } void Game::playerUseItemEx(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint16_t fromSpriteId, const Position& toPos, uint8_t toStackPos, uint16_t toSpriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0); if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { return; } Thing* thing = internalGetThing(player, fromPos, fromStackPos, fromSpriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || !item->isUseable() || item->getClientID() != fromSpriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } Position walkToPos = fromPos; ReturnValue ret = g_actions->canUse(player, fromPos); if (ret == RETURNVALUE_NOERROR) { ret = g_actions->canUse(player, toPos, item); if (ret == RETURNVALUE_TOOFARAWAY) { walkToPos = toPos; } } if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && toPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) { Item* moveItem = nullptr; ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::forward_list<Direction> listDir; if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItemEx, this, playerId, itemPos, itemStackPos, fromSpriteId, toPos, toStackPos, toSpriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItemEx, this, playerId, fromPos, fromStackPos, fromSpriteId, toPos, toStackPos, toSpriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItemEx(player, fromPos, toPos, toStackPos, item, isHotkey); } void Game::playerUseItem(uint32_t playerId, const Position& pos, uint8_t stackPos, uint8_t index, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } bool isHotkey = (pos.x == 0xFFFF && pos.y == 0 && pos.z == 0); if (isHotkey && !g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { return; } Thing* thing = internalGetThing(player, pos, stackPos, spriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || item->isUseable() || item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } ReturnValue ret = g_actions->canUse(player, pos); if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { std::forward_list<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseItem, this, playerId, pos, stackPos, index, spriteId)); player->setNextWalkActionTask(task); return; } ret = RETURNVALUE_THEREISNOWAY; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseItem, this, playerId, pos, stackPos, index, spriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItem(player, pos, index, item, isHotkey); } void Game::playerUseWithCreature(uint32_t playerId, const Position& fromPos, uint8_t fromStackPos, uint32_t creatureId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } if (!Position::areInRange<7, 5, 0>(creature->getPosition(), player->getPosition())) { return; } bool isHotkey = (fromPos.x == 0xFFFF && fromPos.y == 0 && fromPos.z == 0); if (!g_config.getBoolean(ConfigManager::AIMBOT_HOTKEY_ENABLED)) { if (creature->getPlayer() || isHotkey) { player->sendCancelMessage(RETURNVALUE_DIRECTPLAYERSHOOT); return; } } Thing* thing = internalGetThing(player, fromPos, fromStackPos, spriteId, STACKPOS_USEITEM); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* item = thing->getItem(); if (!item || !item->isUseable() || item->getClientID() != spriteId) { player->sendCancelMessage(RETURNVALUE_CANNOTUSETHISOBJECT); return; } Position toPos = creature->getPosition(); Position walkToPos = fromPos; ReturnValue ret = g_actions->canUse(player, fromPos); if (ret == RETURNVALUE_NOERROR) { ret = g_actions->canUse(player, toPos, item); if (ret == RETURNVALUE_TOOFARAWAY) { walkToPos = toPos; } } if (ret != RETURNVALUE_NOERROR) { if (ret == RETURNVALUE_TOOFARAWAY) { Position itemPos = fromPos; uint8_t itemStackPos = fromStackPos; if (fromPos.x != 0xFFFF && Position::areInRange<1, 1, 0>(fromPos, player->getPosition()) && !Position::areInRange<1, 1, 0>(fromPos, toPos)) { Item* moveItem = nullptr; ret = internalMoveItem(item->getParent(), player, INDEX_WHEREEVER, item, item->getItemCount(), &moveItem); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); return; } //changing the position since its now in the inventory of the player internalGetPosition(moveItem, itemPos, itemStackPos); } std::forward_list<Direction> listDir; if (player->getPathTo(walkToPos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerUseWithCreature, this, playerId, itemPos, itemStackPos, creatureId, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } player->sendCancelMessage(ret); return; } if (!player->canDoAction()) { uint32_t delay = player->getNextActionTime(); SchedulerTask* task = createSchedulerTask(delay, std::bind(&Game::playerUseWithCreature, this, playerId, fromPos, fromStackPos, creatureId, spriteId)); player->setNextActionTask(task); return; } player->resetIdleTime(); player->setNextActionTask(nullptr); g_actions->useItemEx(player, fromPos, creature->getPosition(), creature->getParent()->getThingIndex(creature), item, isHotkey, creature); } void Game::playerCloseContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->closeContainer(cid); player->sendCloseContainer(cid); } void Game::playerMoveUpContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(cid); if (!container) { return; } Container* parentContainer = dynamic_cast<Container*>(container->getRealParent()); if (!parentContainer) { Tile* tile = container->getTile(); if (!tile) { return; } auto it = browseFields.find(tile); if (it == browseFields.end()) { parentContainer = new Container(tile); parentContainer->incrementReferenceCounter(); browseFields[tile] = parentContainer; g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition()))); } else { parentContainer = it->second; } } player->addContainer(cid, parentContainer); player->sendContainer(cid, parentContainer, parentContainer->hasParent(), player->getContainerIndex(cid)); } void Game::playerUpdateContainer(uint32_t playerId, uint8_t cid) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(cid); if (!container) { return; } player->sendContainer(cid, container, container->hasParent(), player->getContainerIndex(cid)); } void Game::playerRotateItem(uint32_t playerId, const Position& pos, uint8_t stackPos, const uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!thing) { return; } Item* item = thing->getItem(); if (!item || item->getClientID() != spriteId || !item->isRotatable() || item->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (pos.x != 0xFFFF && !Position::areInRange<1, 1, 0>(pos, player->getPosition())) { std::forward_list<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRotateItem, this, playerId, pos, stackPos, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } uint16_t newId = Item::items[item->getID()].rotateTo; if (newId != 0) { transformItem(item, newId); } } void Game::playerWriteItem(uint32_t playerId, uint32_t windowTextId, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint16_t maxTextLength = 0; uint32_t internalWindowTextId = 0; Item* writeItem = player->getWriteItem(internalWindowTextId, maxTextLength); if (text.length() > maxTextLength || windowTextId != internalWindowTextId) { return; } if (!writeItem || writeItem->isRemoved()) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Cylinder* topParent = writeItem->getTopParent(); Player* owner = dynamic_cast<Player*>(topParent); if (owner && owner != player) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } if (!Position::areInRange<1, 1, 0>(writeItem->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_TEXTEDIT)) { if (!creatureEvent->executeTextEdit(player, writeItem, text)) { player->setWriteItem(nullptr); return; } } if (!text.empty()) { if (writeItem->getText() != text) { writeItem->setText(text); writeItem->setWriter(player->getName()); writeItem->setDate(time(nullptr)); } } else { writeItem->resetText(); writeItem->resetWriter(); writeItem->resetDate(); } uint16_t newId = Item::items[writeItem->getID()].writeOnceItemId; if (newId != 0) { transformItem(writeItem, newId); } player->setWriteItem(nullptr); } void Game::playerBrowseField(uint32_t playerId, const Position& pos) { Player* player = getPlayerByID(playerId); if (!player) { return; } const Position& playerPos = player->getPosition(); if (playerPos.z != pos.z) { player->sendCancelMessage(playerPos.z > pos.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(playerPos, pos)) { std::forward_list<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind( &Game::playerBrowseField, this, playerId, pos )); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } Tile* tile = map.getTile(pos); if (!tile) { return; } if (!g_events->eventPlayerOnBrowseField(player, pos)) { return; } Container* container; auto it = browseFields.find(tile); if (it == browseFields.end()) { container = new Container(tile); container->incrementReferenceCounter(); browseFields[tile] = container; g_scheduler.addEvent(createSchedulerTask(30000, std::bind(&Game::decreaseBrowseFieldRef, this, tile->getPosition()))); } else { container = it->second; } uint8_t dummyContainerId = 0xF - ((pos.x % 3) * 3 + (pos.y % 3)); Container* openContainer = player->getContainerByID(dummyContainerId); if (openContainer) { player->onCloseContainer(openContainer); player->closeContainer(dummyContainerId); } else { player->addContainer(dummyContainerId, container); player->sendContainer(dummyContainerId, container, false, 0); } } void Game::playerSeekInContainer(uint32_t playerId, uint8_t containerId, uint16_t index) { Player* player = getPlayerByID(playerId); if (!player) { return; } Container* container = player->getContainerByID(containerId); if (!container || !container->hasPagination()) { return; } if ((index % container->capacity()) != 0 || index >= container->size()) { return; } player->setContainerIndex(containerId, index); player->sendContainer(containerId, container, container->hasParent(), index); } void Game::playerUpdateHouseWindow(uint32_t playerId, uint8_t listId, uint32_t windowTextId, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } uint32_t internalWindowTextId; uint32_t internalListId; House* house = player->getEditHouse(internalWindowTextId, internalListId); if (house && house->canEditAccessList(internalListId, player) && internalWindowTextId == windowTextId && listId == 0) { house->setAccessList(internalListId, text); } player->setEditHouse(nullptr); } void Game::playerRequestTrade(uint32_t playerId, const Position& pos, uint8_t stackPos, uint32_t tradePlayerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* tradePartner = getPlayerByID(tradePlayerId); if (!tradePartner || tradePartner == player) { player->sendTextMessage(MESSAGE_INFO_DESCR, "Sorry, not possible."); return; } if (!Position::areInRange<2, 2, 0>(tradePartner->getPosition(), player->getPosition())) { std::ostringstream ss; ss << tradePartner->getName() << " tells you to move closer."; player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str()); return; } if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE); return; } Thing* tradeThing = internalGetThing(player, pos, stackPos, 0, STACKPOS_TOPDOWN_ITEM); if (!tradeThing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Item* tradeItem = tradeThing->getItem(); if (tradeItem->getClientID() != spriteId || !tradeItem->isPickupable() || tradeItem->hasAttribute(ITEM_ATTRIBUTE_UNIQUEID)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } const Position& playerPosition = player->getPosition(); const Position& tradeItemPosition = tradeItem->getPosition(); if (playerPosition.z != tradeItemPosition.z) { player->sendCancelMessage(playerPosition.z > tradeItemPosition.z ? RETURNVALUE_FIRSTGOUPSTAIRS : RETURNVALUE_FIRSTGODOWNSTAIRS); return; } if (!Position::areInRange<1, 1>(tradeItemPosition, playerPosition)) { std::forward_list<Direction> listDir; if (player->getPathTo(pos, listDir, 0, 1, true, true)) { g_dispatcher.addTask(createTask(std::bind(&Game::playerAutoWalk, this, player->getID(), listDir))); SchedulerTask* task = createSchedulerTask(400, std::bind(&Game::playerRequestTrade, this, playerId, pos, stackPos, tradePlayerId, spriteId)); player->setNextWalkActionTask(task); } else { player->sendCancelMessage(RETURNVALUE_THEREISNOWAY); } return; } Container* tradeItemContainer = tradeItem->getContainer(); if (tradeItemContainer) { for (const auto& it : tradeItems) { Item* item = it.first; if (tradeItem == item) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } if (tradeItemContainer->isHoldingItem(item)) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } } } else { for (const auto& it : tradeItems) { Item* item = it.first; if (tradeItem == item) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } Container* container = item->getContainer(); if (container && container->isHoldingItem(tradeItem)) { player->sendTextMessage(MESSAGE_INFO_DESCR, "This item is already being traded."); return; } } } Container* tradeContainer = tradeItem->getContainer(); if (tradeContainer && tradeContainer->getItemHoldingCount() + 1 > 100) { player->sendTextMessage(MESSAGE_INFO_DESCR, "You can not trade more than 100 items."); return; } if (!g_events->eventPlayerOnTradeRequest(player, tradePartner, tradeItem)) { return; } internalStartTrade(player, tradePartner, tradeItem); } bool Game::internalStartTrade(Player* player, Player* tradePartner, Item* tradeItem) { if (player->tradeState != TRADE_NONE && !(player->tradeState == TRADE_ACKNOWLEDGE && player->tradePartner == tradePartner)) { player->sendCancelMessage(RETURNVALUE_YOUAREALREADYTRADING); return false; } else if (tradePartner->tradeState != TRADE_NONE && tradePartner->tradePartner != player) { player->sendCancelMessage(RETURNVALUE_THISPLAYERISALREADYTRADING); return false; } player->tradePartner = tradePartner; player->tradeItem = tradeItem; player->tradeState = TRADE_INITIATED; tradeItem->incrementReferenceCounter(); tradeItems[tradeItem] = player->getID(); player->sendTradeItemRequest(player->getName(), tradeItem, true); if (tradePartner->tradeState == TRADE_NONE) { std::ostringstream ss; ss << player->getName() << " wants to trade with you."; tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, ss.str()); tradePartner->tradeState = TRADE_ACKNOWLEDGE; tradePartner->tradePartner = player; } else { Item* counterOfferItem = tradePartner->tradeItem; player->sendTradeItemRequest(tradePartner->getName(), counterOfferItem, false); tradePartner->sendTradeItemRequest(player->getName(), tradeItem, false); } return true; } void Game::playerAcceptTrade(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!(player->getTradeState() == TRADE_ACKNOWLEDGE || player->getTradeState() == TRADE_INITIATED)) { return; } Player* tradePartner = player->tradePartner; if (!tradePartner) { return; } if (!canThrowObjectTo(tradePartner->getPosition(), player->getPosition())) { player->sendCancelMessage(RETURNVALUE_CREATUREISNOTREACHABLE); return; } player->setTradeState(TRADE_ACCEPT); if (tradePartner->getTradeState() == TRADE_ACCEPT) { Item* tradeItem1 = player->tradeItem; Item* tradeItem2 = tradePartner->tradeItem; if (!g_events->eventPlayerOnTradeAccept(player, tradePartner, tradeItem1, tradeItem2)) { internalCloseTrade(player); return; } player->setTradeState(TRADE_TRANSFER); tradePartner->setTradeState(TRADE_TRANSFER); auto it = tradeItems.find(tradeItem1); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } it = tradeItems.find(tradeItem2); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } bool isSuccess = false; ReturnValue ret1 = internalAddItem(tradePartner, tradeItem1, INDEX_WHEREEVER, 0, true); ReturnValue ret2 = internalAddItem(player, tradeItem2, INDEX_WHEREEVER, 0, true); if (ret1 == RETURNVALUE_NOERROR && ret2 == RETURNVALUE_NOERROR) { ret1 = internalRemoveItem(tradeItem1, tradeItem1->getItemCount(), true); ret2 = internalRemoveItem(tradeItem2, tradeItem2->getItemCount(), true); if (ret1 == RETURNVALUE_NOERROR && ret2 == RETURNVALUE_NOERROR) { Cylinder* cylinder1 = tradeItem1->getParent(); Cylinder* cylinder2 = tradeItem2->getParent(); uint32_t count1 = tradeItem1->getItemCount(); uint32_t count2 = tradeItem2->getItemCount(); ret1 = internalMoveItem(cylinder1, tradePartner, INDEX_WHEREEVER, tradeItem1, count1, nullptr, FLAG_IGNOREAUTOSTACK, nullptr, tradeItem2); if (ret1 == RETURNVALUE_NOERROR) { internalMoveItem(cylinder2, player, INDEX_WHEREEVER, tradeItem2, count2, nullptr, FLAG_IGNOREAUTOSTACK); tradeItem1->onTradeEvent(ON_TRADE_TRANSFER, tradePartner); tradeItem2->onTradeEvent(ON_TRADE_TRANSFER, player); isSuccess = true; } } } if (!isSuccess) { std::string errorDescription; if (tradePartner->tradeItem) { errorDescription = getTradeErrorDescription(ret1, tradeItem1); tradePartner->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription); tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner); } if (player->tradeItem) { errorDescription = getTradeErrorDescription(ret2, tradeItem2); player->sendTextMessage(MESSAGE_EVENT_ADVANCE, errorDescription); player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player); } } player->setTradeState(TRADE_NONE); player->tradeItem = nullptr; player->tradePartner = nullptr; player->sendTradeClose(); tradePartner->setTradeState(TRADE_NONE); tradePartner->tradeItem = nullptr; tradePartner->tradePartner = nullptr; tradePartner->sendTradeClose(); } } std::string Game::getTradeErrorDescription(ReturnValue ret, Item* item) { if (item) { if (ret == RETURNVALUE_NOTENOUGHCAPACITY) { std::ostringstream ss; ss << "You do not have enough capacity to carry"; if (item->isStackable() && item->getItemCount() > 1) { ss << " these objects."; } else { ss << " this object."; } ss << "\n " << item->getWeightDescription(); return ss.str(); } else if (ret == RETURNVALUE_NOTENOUGHROOM || ret == RETURNVALUE_CONTAINERNOTENOUGHROOM) { std::ostringstream ss; ss << "You do not have enough room to carry"; if (item->isStackable() && item->getItemCount() > 1) { ss << " these objects."; } else { ss << " this object."; } return ss.str(); } } return "Trade could not be completed."; } void Game::playerLookInTrade(uint32_t playerId, bool lookAtCounterOffer, uint8_t index) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* tradePartner = player->tradePartner; if (!tradePartner) { return; } Item* tradeItem; if (lookAtCounterOffer) { tradeItem = tradePartner->getTradeItem(); } else { tradeItem = player->getTradeItem(); } if (!tradeItem) { return; } const Position& playerPosition = player->getPosition(); const Position& tradeItemPosition = tradeItem->getPosition(); int32_t lookDistance = std::max<int32_t>(Position::getDistanceX(playerPosition, tradeItemPosition), Position::getDistanceY(playerPosition, tradeItemPosition)); if (index == 0) { g_events->eventPlayerOnLookInTrade(player, tradePartner, tradeItem, lookDistance); return; } Container* tradeContainer = tradeItem->getContainer(); if (!tradeContainer) { return; } std::vector<const Container*> containers {tradeContainer}; size_t i = 0; while (i < containers.size()) { const Container* container = containers[i++]; for (Item* item : container->getItemList()) { Container* tmpContainer = item->getContainer(); if (tmpContainer) { containers.push_back(tmpContainer); } if (--index == 0) { g_events->eventPlayerOnLookInTrade(player, tradePartner, item, lookDistance); return; } } } } void Game::playerCloseTrade(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } internalCloseTrade(player); } void Game::internalCloseTrade(Player* player) { Player* tradePartner = player->tradePartner; if ((tradePartner && tradePartner->getTradeState() == TRADE_TRANSFER) || player->getTradeState() == TRADE_TRANSFER) { return; } if (player->getTradeItem()) { auto it = tradeItems.find(player->getTradeItem()); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } player->tradeItem->onTradeEvent(ON_TRADE_CANCEL, player); player->tradeItem = nullptr; } player->setTradeState(TRADE_NONE); player->tradePartner = nullptr; player->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled."); player->sendTradeClose(); if (tradePartner) { if (tradePartner->getTradeItem()) { auto it = tradeItems.find(tradePartner->getTradeItem()); if (it != tradeItems.end()) { ReleaseItem(it->first); tradeItems.erase(it); } tradePartner->tradeItem->onTradeEvent(ON_TRADE_CANCEL, tradePartner); tradePartner->tradeItem = nullptr; } tradePartner->setTradeState(TRADE_NONE); tradePartner->tradePartner = nullptr; tradePartner->sendTextMessage(MESSAGE_STATUS_SMALL, "Trade cancelled."); tradePartner->sendTradeClose(); } } void Game::playerPurchaseItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreCap/* = false*/, bool inBackpacks/* = false*/) { if (amount == 0 || amount > 100) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } uint8_t subType; if (it.isSplash() || it.isFluidContainer()) { subType = clientFluidToServer(count); } else { subType = count; } if (!player->hasShopItemForSale(it.id, subType)) { return; } merchant->onPlayerTrade(player, onBuy, it.id, subType, amount, ignoreCap, inBackpacks); } void Game::playerSellItem(uint32_t playerId, uint16_t spriteId, uint8_t count, uint8_t amount, bool ignoreEquipped) { if (amount == 0 || amount > 100) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } uint8_t subType; if (it.isSplash() || it.isFluidContainer()) { subType = clientFluidToServer(count); } else { subType = count; } merchant->onPlayerTrade(player, onSell, it.id, subType, amount, ignoreEquipped); } void Game::playerCloseShop(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->closeShopWindow(); } void Game::playerLookInShop(uint32_t playerId, uint16_t spriteId, uint8_t count) { Player* player = getPlayerByID(playerId); if (!player) { return; } int32_t onBuy, onSell; Npc* merchant = player->getShopOwner(onBuy, onSell); if (!merchant) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } int32_t subType; if (it.isFluidContainer() || it.isSplash()) { subType = clientFluidToServer(count); } else { subType = count; } if (!player->hasShopItemForSale(it.id, subType)) { return; } if (!g_events->eventPlayerOnLookInShop(player, &it, subType)) { return; } std::ostringstream ss; ss << "You see " << Item::getDescription(it, 1, nullptr, subType); player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str()); } void Game::playerLookAt(uint32_t playerId, const Position& pos, uint8_t stackPos) { Player* player = getPlayerByID(playerId); if (!player) { return; } Thing* thing = internalGetThing(player, pos, stackPos, 0, STACKPOS_LOOK); if (!thing) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Position thingPos = thing->getPosition(); if (!player->canSee(thingPos)) { player->sendCancelMessage(RETURNVALUE_NOTPOSSIBLE); return; } Position playerPos = player->getPosition(); int32_t lookDistance; if (thing != player) { lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, thingPos), Position::getDistanceY(playerPos, thingPos)); if (playerPos.z != thingPos.z) { lookDistance += 15; } } else { lookDistance = -1; } g_events->eventPlayerOnLook(player, pos, thing, stackPos, lookDistance); } void Game::playerLookInBattleList(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } if (!player->canSeeCreature(creature)) { return; } const Position& creaturePos = creature->getPosition(); if (!player->canSee(creaturePos)) { return; } int32_t lookDistance; if (creature != player) { const Position& playerPos = player->getPosition(); lookDistance = std::max<int32_t>(Position::getDistanceX(playerPos, creaturePos), Position::getDistanceY(playerPos, creaturePos)); if (playerPos.z != creaturePos.z) { lookDistance += 15; } } else { lookDistance = -1; } g_events->eventPlayerOnLookInBattleList(player, creature, lookDistance); } void Game::playerCancelAttackAndFollow(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } playerSetAttackedCreature(playerId, 0); playerFollowCreature(playerId, 0); player->stopWalk(); } void Game::playerSetAttackedCreature(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (player->getAttackedCreature() && creatureId == 0) { player->setAttackedCreature(nullptr); player->sendCancelTarget(); return; } Creature* attackCreature = getCreatureByID(creatureId); if (!attackCreature) { player->setAttackedCreature(nullptr); player->sendCancelTarget(); return; } ReturnValue ret = Combat::canTargetCreature(player, attackCreature); if (ret != RETURNVALUE_NOERROR) { player->sendCancelMessage(ret); player->sendCancelTarget(); player->setAttackedCreature(nullptr); return; } player->setAttackedCreature(attackCreature); g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID()))); } void Game::playerFollowCreature(uint32_t playerId, uint32_t creatureId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setAttackedCreature(nullptr); g_dispatcher.addTask(createTask(std::bind(&Game::updateCreatureWalk, this, player->getID()))); player->setFollowCreature(getCreatureByID(creatureId)); } void Game::playerSetFightModes(uint32_t playerId, fightMode_t fightMode, bool chaseMode, bool secureMode) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setFightMode(fightMode); player->setChaseMode(chaseMode); player->setSecureMode(secureMode); } void Game::playerRequestAddVip(uint32_t playerId, const std::string& name) { if (name.length() > 20) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } Player* vipPlayer = getPlayerByName(name); if (!vipPlayer) { uint32_t guid; bool specialVip; std::string formattedName = name; if (!IOLoginData::getGuidByNameEx(guid, specialVip, formattedName)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name does not exist."); return; } if (specialVip && !player->hasFlag(PlayerFlag_SpecialVIP)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player."); return; } player->addVIP(guid, formattedName, VIPSTATUS_OFFLINE); } else { if (vipPlayer->hasFlag(PlayerFlag_SpecialVIP) && !player->hasFlag(PlayerFlag_SpecialVIP)) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You can not add this player."); return; } if (!vipPlayer->isInGhostMode() || player->isAccessPlayer()) { player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_ONLINE); } else { player->addVIP(vipPlayer->getGUID(), vipPlayer->getName(), VIPSTATUS_OFFLINE); } } } void Game::playerRequestRemoveVip(uint32_t playerId, uint32_t guid) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->removeVIP(guid); } void Game::playerRequestEditVip(uint32_t playerId, uint32_t guid, const std::string& description, uint32_t icon, bool notify) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->editVIP(guid, description, icon, notify); } void Game::playerTurn(uint32_t playerId, Direction dir) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!g_events->eventPlayerOnTurn(player, dir)) { return; } player->resetIdleTime(); internalCreatureTurn(player, dir); } void Game::playerRequestOutfit(uint32_t playerId) { if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendOutfitWindow(); } void Game::playerToggleMount(uint32_t playerId, bool mount) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->toggleMount(mount); } void Game::playerChangeOutfit(uint32_t playerId, Outfit_t outfit) { if (!g_config.getBoolean(ConfigManager::ALLOW_CHANGEOUTFIT)) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } const Outfit* playerOutfit = Outfits::getInstance().getOutfitByLookType(player->getSex(), outfit.lookType); if (!playerOutfit) { outfit.lookMount = 0; } if (outfit.lookMount != 0) { Mount* mount = mounts.getMountByClientID(outfit.lookMount); if (!mount) { return; } if (!player->hasMount(mount)) { return; } if (player->isMounted()) { Mount* prevMount = mounts.getMountByID(player->getCurrentMount()); if (prevMount) { changeSpeed(player, mount->speed - prevMount->speed); } player->setCurrentMount(mount->id); } else { player->setCurrentMount(mount->id); outfit.lookMount = 0; } } else if (player->isMounted()) { player->dismount(); } if (player->canWear(outfit.lookType, outfit.lookAddons)) { player->defaultOutfit = outfit; if (player->hasCondition(CONDITION_OUTFIT)) { return; } internalCreatureChangeOutfit(player, outfit); } } void Game::playerShowQuestLog(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->sendQuestLog(); } void Game::playerShowQuestLine(uint32_t playerId, uint16_t questId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Quest* quest = quests.getQuestByID(questId); if (!quest) { return; } player->sendQuestLine(quest); } void Game::playerSay(uint32_t playerId, uint16_t channelId, SpeakClasses type, const std::string& receiver, const std::string& text) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->resetIdleTime(); if (playerSaySpell(player, type, text)) { return; } uint32_t muteTime = player->isMuted(); if (muteTime > 0) { std::ostringstream ss; ss << "You are still muted for " << muteTime << " seconds."; player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); return; } if (!text.empty() && text.front() == '/' && player->isAccessPlayer()) { return; } if (type != TALKTYPE_PRIVATE_PN) { player->removeMessageBuffer(); } switch (type) { case TALKTYPE_SAY: internalCreatureSay(player, TALKTYPE_SAY, text, false); break; case TALKTYPE_WHISPER: playerWhisper(player, text); break; case TALKTYPE_YELL: playerYell(player, text); break; case TALKTYPE_PRIVATE_TO: case TALKTYPE_PRIVATE_RED_TO: playerSpeakTo(player, type, receiver, text); break; case TALKTYPE_CHANNEL_O: case TALKTYPE_CHANNEL_Y: case TALKTYPE_CHANNEL_R1: g_chat->talkToChannel(*player, type, text, channelId); break; case TALKTYPE_PRIVATE_PN: playerSpeakToNpc(player, text); break; case TALKTYPE_BROADCAST: playerBroadcastMessage(player, text); break; default: break; } } bool Game::playerSaySpell(Player* player, SpeakClasses type, const std::string& text) { std::string words = text; TalkActionResult_t result = g_talkActions->playerSaySpell(player, type, words); if (result == TALKACTION_BREAK) { return true; } result = g_spells->playerSaySpell(player, words); if (result == TALKACTION_BREAK) { if (!g_config.getBoolean(ConfigManager::EMOTE_SPELLS)) { return internalCreatureSay(player, TALKTYPE_SAY, words, false); } else { return internalCreatureSay(player, TALKTYPE_MONSTER_SAY, words, false); } } else if (result == TALKACTION_FAILED) { return true; } return false; } void Game::playerWhisper(Player* player, const std::string& text) { SpectatorHashSet spectators; map.getSpectators(spectators, player->getPosition(), false, false, Map::maxClientViewportX, Map::maxClientViewportX, Map::maxClientViewportY, Map::maxClientViewportY); //send to client for (Creature* spectator : spectators) { if (Player* spectatorPlayer = spectator->getPlayer()) { if (!Position::areInRange<1, 1>(player->getPosition(), spectatorPlayer->getPosition())) { spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, "pspsps"); } else { spectatorPlayer->sendCreatureSay(player, TALKTYPE_WHISPER, text); } } } //event method for (Creature* spectator : spectators) { spectator->onCreatureSay(player, TALKTYPE_WHISPER, text); } } bool Game::playerYell(Player* player, const std::string& text) { if (player->getLevel() == 1) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "You may not yell as long as you are on level 1."); return false; } if (player->hasCondition(CONDITION_YELLTICKS)) { player->sendCancelMessage(RETURNVALUE_YOUAREEXHAUSTED); return false; } if (player->getAccountType() < ACCOUNT_TYPE_GAMEMASTER) { Condition* condition = Condition::createCondition(CONDITIONID_DEFAULT, CONDITION_YELLTICKS, 30000, 0); player->addCondition(condition); } internalCreatureSay(player, TALKTYPE_YELL, asUpperCaseString(text), false); return true; } bool Game::playerSpeakTo(Player* player, SpeakClasses type, const std::string& receiver, const std::string& text) { Player* toPlayer = getPlayerByName(receiver); if (!toPlayer) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online."); return false; } if (type == TALKTYPE_PRIVATE_RED_TO && (player->hasFlag(PlayerFlag_CanTalkRedPrivate) || player->getAccountType() >= ACCOUNT_TYPE_GAMEMASTER)) { type = TALKTYPE_PRIVATE_RED_FROM; } else { type = TALKTYPE_PRIVATE_FROM; } toPlayer->sendPrivateMessage(player, type, text); toPlayer->onCreatureSay(player, type, text); if (toPlayer->isInGhostMode() && !player->isAccessPlayer()) { player->sendTextMessage(MESSAGE_STATUS_SMALL, "A player with this name is not online."); } else { std::ostringstream ss; ss << "Message sent to " << toPlayer->getName() << '.'; player->sendTextMessage(MESSAGE_STATUS_SMALL, ss.str()); } return true; } void Game::playerSpeakToNpc(Player* player, const std::string& text) { SpectatorHashSet spectators; map.getSpectators(spectators, player->getPosition()); for (Creature* spectator : spectators) { if (spectator->getNpc()) { spectator->onCreatureSay(player, TALKTYPE_PRIVATE_PN, text); } } } //-- bool Game::canThrowObjectTo(const Position& fromPos, const Position& toPos, bool checkLineOfSight /*= true*/, int32_t rangex /*= Map::maxClientViewportX*/, int32_t rangey /*= Map::maxClientViewportY*/) const { return map.canThrowObjectTo(fromPos, toPos, checkLineOfSight, rangex, rangey); } bool Game::isSightClear(const Position& fromPos, const Position& toPos, bool floorCheck) const { return map.isSightClear(fromPos, toPos, floorCheck); } bool Game::internalCreatureTurn(Creature* creature, Direction dir) { if (creature->getDirection() == dir) { return false; } creature->setDirection(dir); //send to client SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureTurn(creature); } return true; } bool Game::internalCreatureSay(Creature* creature, SpeakClasses type, const std::string& text, bool ghostMode, SpectatorHashSet* spectatorsPtr/* = nullptr*/, const Position* pos/* = nullptr*/) { if (text.empty()) { return false; } if (!pos) { pos = &creature->getPosition(); } SpectatorHashSet spectators; if (!spectatorsPtr || spectatorsPtr->empty()) { // This somewhat complex construct ensures that the cached SpectatorHashSet // is used if available and if it can be used, else a local vector is // used (hopefully the compiler will optimize away the construction of // the temporary when it's not used). if (type != TALKTYPE_YELL && type != TALKTYPE_MONSTER_YELL) { map.getSpectators(spectators, *pos, false, false, Map::maxClientViewportX, Map::maxClientViewportX, Map::maxClientViewportY, Map::maxClientViewportY); } else { map.getSpectators(spectators, *pos, true, false, 18, 18, 14, 14); } } else { spectators = (*spectatorsPtr); } //send to client for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { if (!ghostMode || tmpPlayer->canSeeCreature(creature)) { tmpPlayer->sendCreatureSay(creature, type, text, pos); } } } //event method for (Creature* spectator : spectators) { spectator->onCreatureSay(creature, type, text); } return true; } void Game::checkCreatureWalk(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->onWalk(); cleanup(); } } void Game::updateCreatureWalk(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->goToFollowCreature(); } } void Game::checkCreatureAttack(uint32_t creatureId) { Creature* creature = getCreatureByID(creatureId); if (creature && creature->getHealth() > 0) { creature->onAttacking(0); } } void Game::addCreatureCheck(Creature* creature) { creature->creatureCheck = true; if (creature->inCheckCreaturesVector) { // already in a vector return; } creature->inCheckCreaturesVector = true; checkCreatureLists[uniform_random(0, EVENT_CREATURECOUNT - 1)].push_back(creature); creature->incrementReferenceCounter(); } void Game::removeCreatureCheck(Creature* creature) { if (creature->inCheckCreaturesVector) { creature->creatureCheck = false; } } void Game::checkCreatures(size_t index) { g_scheduler.addEvent(createSchedulerTask(EVENT_CHECK_CREATURE_INTERVAL, std::bind(&Game::checkCreatures, this, (index + 1) % EVENT_CREATURECOUNT))); auto& checkCreatureList = checkCreatureLists[index]; auto it = checkCreatureList.begin(), end = checkCreatureList.end(); while (it != end) { Creature* creature = *it; if (creature->creatureCheck) { if (creature->getHealth() > 0) { creature->onThink(EVENT_CREATURE_THINK_INTERVAL); creature->onAttacking(EVENT_CREATURE_THINK_INTERVAL); creature->executeConditions(EVENT_CREATURE_THINK_INTERVAL); } else { creature->onDeath(); } ++it; } else { creature->inCheckCreaturesVector = false; it = checkCreatureList.erase(it); ReleaseCreature(creature); } } cleanup(); } void Game::changeSpeed(Creature* creature, int32_t varSpeedDelta) { int32_t varSpeed = creature->getSpeed() - creature->getBaseSpeed(); varSpeed += varSpeedDelta; creature->setSpeed(varSpeed); //send to clients SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), false, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendChangeSpeed(creature, creature->getStepSpeed()); } } void Game::internalCreatureChangeOutfit(Creature* creature, const Outfit_t& outfit) { if (!g_events->eventCreatureOnChangeOutfit(creature, outfit)) { return; } creature->setCurrentOutfit(outfit); if (creature->isInvisible()) { return; } //send to clients SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureChangeOutfit(creature, outfit); } } void Game::internalCreatureChangeVisible(Creature* creature, bool visible) { //send to clients SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureChangeVisible(creature, visible); } } void Game::changeLight(const Creature* creature) { //send to clients SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureLight(creature); } } bool Game::combatBlockHit(CombatDamage& damage, Creature* attacker, Creature* target, bool checkDefense, bool checkArmor, bool field) { if (damage.primary.type == COMBAT_NONE && damage.secondary.type == COMBAT_NONE) { return true; } if (target->getPlayer() && target->isInGhostMode()) { return true; } if (damage.primary.value > 0) { return false; } static const auto sendBlockEffect = [this](BlockType_t blockType, CombatType_t combatType, const Position& targetPos) { if (blockType == BLOCK_DEFENSE) { addMagicEffect(targetPos, CONST_ME_POFF); } else if (blockType == BLOCK_ARMOR) { addMagicEffect(targetPos, CONST_ME_BLOCKHIT); } else if (blockType == BLOCK_IMMUNITY) { uint8_t hitEffect = 0; switch (combatType) { case COMBAT_UNDEFINEDDAMAGE: { return; } case COMBAT_ENERGYDAMAGE: case COMBAT_FIREDAMAGE: case COMBAT_PHYSICALDAMAGE: case COMBAT_ICEDAMAGE: case COMBAT_DEATHDAMAGE: { hitEffect = CONST_ME_BLOCKHIT; break; } case COMBAT_EARTHDAMAGE: { hitEffect = CONST_ME_GREEN_RINGS; break; } case COMBAT_HOLYDAMAGE: { hitEffect = CONST_ME_HOLYDAMAGE; break; } default: { hitEffect = CONST_ME_POFF; break; } } addMagicEffect(targetPos, hitEffect); } }; BlockType_t primaryBlockType, secondaryBlockType; if (damage.primary.type != COMBAT_NONE) { damage.primary.value = -damage.primary.value; primaryBlockType = target->blockHit(attacker, damage.primary.type, damage.primary.value, checkDefense, checkArmor, field); damage.primary.value = -damage.primary.value; sendBlockEffect(primaryBlockType, damage.primary.type, target->getPosition()); } else { primaryBlockType = BLOCK_NONE; } if (damage.secondary.type != COMBAT_NONE) { damage.secondary.value = -damage.secondary.value; secondaryBlockType = target->blockHit(attacker, damage.secondary.type, damage.secondary.value, false, false, field); damage.secondary.value = -damage.secondary.value; sendBlockEffect(secondaryBlockType, damage.secondary.type, target->getPosition()); } else { secondaryBlockType = BLOCK_NONE; } return (primaryBlockType != BLOCK_NONE) && (secondaryBlockType != BLOCK_NONE); } void Game::combatGetTypeInfo(CombatType_t combatType, Creature* target, TextColor_t& color, uint8_t& effect) { switch (combatType) { case COMBAT_PHYSICALDAMAGE: { Item* splash = nullptr; switch (target->getRace()) { case RACE_VENOM: color = TEXTCOLOR_LIGHTGREEN; effect = CONST_ME_HITBYPOISON; splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_SLIME); break; case RACE_BLOOD: color = TEXTCOLOR_RED; effect = CONST_ME_DRAWBLOOD; if (const Tile* tile = target->getTile()) { if (!tile->hasFlag(TILESTATE_PROTECTIONZONE)) { splash = Item::CreateItem(ITEM_SMALLSPLASH, FLUID_BLOOD); } } break; case RACE_UNDEAD: color = TEXTCOLOR_LIGHTGREY; effect = CONST_ME_HITAREA; break; case RACE_FIRE: color = TEXTCOLOR_ORANGE; effect = CONST_ME_DRAWBLOOD; break; case RACE_ENERGY: color = TEXTCOLOR_ELECTRICPURPLE; effect = CONST_ME_ENERGYHIT; break; default: color = TEXTCOLOR_NONE; effect = CONST_ME_NONE; break; } if (splash) { internalAddItem(target->getTile(), splash, INDEX_WHEREEVER, FLAG_NOLIMIT); startDecay(splash); } break; } case COMBAT_ENERGYDAMAGE: { color = TEXTCOLOR_ELECTRICPURPLE; effect = CONST_ME_ENERGYHIT; break; } case COMBAT_EARTHDAMAGE: { color = TEXTCOLOR_LIGHTGREEN; effect = CONST_ME_GREEN_RINGS; break; } case COMBAT_DROWNDAMAGE: { color = TEXTCOLOR_LIGHTBLUE; effect = CONST_ME_LOSEENERGY; break; } case COMBAT_FIREDAMAGE: { color = TEXTCOLOR_ORANGE; effect = CONST_ME_HITBYFIRE; break; } case COMBAT_ICEDAMAGE: { color = TEXTCOLOR_SKYBLUE; effect = CONST_ME_ICEATTACK; break; } case COMBAT_HOLYDAMAGE: { color = TEXTCOLOR_YELLOW; effect = CONST_ME_HOLYDAMAGE; break; } case COMBAT_DEATHDAMAGE: { color = TEXTCOLOR_DARKRED; effect = CONST_ME_SMALLCLOUDS; break; } case COMBAT_LIFEDRAIN: { color = TEXTCOLOR_RED; effect = CONST_ME_MAGIC_RED; break; } default: { color = TEXTCOLOR_NONE; effect = CONST_ME_NONE; break; } } } bool Game::combatChangeHealth(Creature* attacker, Creature* target, CombatDamage& damage) { const Position& targetPos = target->getPosition(); if (damage.primary.value > 0) { if (target->getHealth() <= 0) { return false; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } Player* targetPlayer = target->getPlayer(); if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeHealthChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeHealth(attacker, target, damage); } } int32_t realHealthChange = target->getHealth(); target->gainHealth(attacker, damage.primary.value); realHealthChange = target->getHealth() - realHealthChange; if (realHealthChange > 0 && !target->isInGhostMode()) { std::stringstream ss; ss << realHealthChange << (realHealthChange != 1 ? " hitpoints." : " hitpoint."); std::string damageString = ss.str(); std::string spectatorMessage; TextMessage message; message.position = targetPos; message.primary.value = realHealthChange; message.primary.color = TEXTCOLOR_PASTELRED; SpectatorHashSet spectators; map.getSpectators(spectators, targetPos, false, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << "You heal " << target->getNameDescription() << " for " << damageString; message.type = MESSAGE_HEALED; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); if (!attacker) { ss << "You were healed"; } else if (targetPlayer == attackerPlayer) { ss << "You healed yourself"; } else { ss << "You were healed by " << attacker->getNameDescription(); } ss << " for " << damageString; message.type = MESSAGE_HEALED; message.text = ss.str(); } else { if (spectatorMessage.empty()) { ss.str({}); if (!attacker) { ss << ucfirst(target->getNameDescription()) << " was healed"; } else { ss << ucfirst(attacker->getNameDescription()) << " healed "; if (attacker == target) { ss << (targetPlayer ? (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "herself" : "himself") : "itself"); } else { ss << target->getNameDescription(); } } ss << " for " << damageString; spectatorMessage = ss.str(); } message.type = MESSAGE_HEALED_OTHERS; message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } } else { if (!target->isAttackable()) { if (!target->isInGhostMode()) { addMagicEffect(targetPos, CONST_ME_POFF); } return true; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } Player* targetPlayer = target->getPlayer(); if (attackerPlayer && targetPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } damage.primary.value = std::abs(damage.primary.value); damage.secondary.value = std::abs(damage.secondary.value); int32_t healthChange = damage.primary.value + damage.secondary.value; if (healthChange == 0) { return true; } if (attackerPlayer) { uint16_t chance = attackerPlayer->getSpecialSkill(SPECIALSKILL_HITPOINTSLEECHCHANCE); if (chance != 0 && uniform_random(1, 100) <= chance) { CombatDamage lifeLeech; lifeLeech.primary.value = std::round(healthChange * (attackerPlayer->getSpecialSkill(SPECIALSKILL_HITPOINTSLEECHAMOUNT) / 100.)); g_game.combatChangeHealth(nullptr, attackerPlayer, lifeLeech); } chance = attackerPlayer->getSpecialSkill(SPECIALSKILL_MANAPOINTSLEECHCHANCE); if (chance != 0 && uniform_random(1, 100) <= chance) { CombatDamage manaLeech; manaLeech.primary.value = std::round(healthChange * (attackerPlayer->getSpecialSkill(SPECIALSKILL_MANAPOINTSLEECHAMOUNT) / 100.)); g_game.combatChangeMana(nullptr, attackerPlayer, manaLeech); } chance = attackerPlayer->getSpecialSkill(SPECIALSKILL_CRITICALHITCHANCE); if (chance != 0 && uniform_random(1, 100) <= chance) { healthChange += std::round(healthChange * (attackerPlayer->getSpecialSkill(SPECIALSKILL_CRITICALHITAMOUNT) / 100.)); g_game.addMagicEffect(target->getPosition(), CONST_ME_CRITICAL_DAMAGE); } } TextMessage message; message.position = targetPos; SpectatorHashSet spectators; if (targetPlayer && target->hasCondition(CONDITION_MANASHIELD) && damage.primary.type != COMBAT_UNDEFINEDDAMAGE) { int32_t manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange); if (manaDamage != 0) { if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } healthChange = damage.primary.value + damage.secondary.value; if (healthChange == 0) { return true; } manaDamage = std::min<int32_t>(targetPlayer->getMana(), healthChange); } } targetPlayer->drainMana(attacker, manaDamage); map.getSpectators(spectators, targetPos, true, true); addMagicEffect(spectators, targetPos, CONST_ME_LOSEENERGY); std::stringstream ss; std::string damageString = std::to_string(manaDamage); std::string spectatorMessage; message.primary.value = manaDamage; message.primary.color = TEXTCOLOR_BLUE; for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer->getPosition().z != targetPos.z) { continue; } if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana due to your attack."; message.type = MESSAGE_DAMAGE_DEALT; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); ss << "You lose " << damageString << " mana"; if (!attacker) { ss << '.'; } else if (targetPlayer == attackerPlayer) { ss << " due to your own attack."; } else { ss << " due to an attack by " << attacker->getNameDescription() << '.'; } message.type = MESSAGE_DAMAGE_RECEIVED; message.text = ss.str(); } else { if (spectatorMessage.empty()) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString + " mana"; if (attacker) { ss << " due to "; if (attacker == target) { ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack"); } else { ss << "an attack by " << attacker->getNameDescription(); } } ss << '.'; spectatorMessage = ss.str(); } message.type = MESSAGE_DAMAGE_OTHERS; message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } damage.primary.value -= manaDamage; if (damage.primary.value < 0) { damage.secondary.value = std::max<int32_t>(0, damage.secondary.value + damage.primary.value); damage.primary.value = 0; } } } int32_t realDamage = damage.primary.value + damage.secondary.value; if (realDamage == 0) { return true; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_HEALTHCHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeHealthChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeHealth(attacker, target, damage); } } int32_t targetHealth = target->getHealth(); if (damage.primary.value >= targetHealth) { damage.primary.value = targetHealth; damage.secondary.value = 0; } else if (damage.secondary.value) { damage.secondary.value = std::min<int32_t>(damage.secondary.value, targetHealth - damage.primary.value); } realDamage = damage.primary.value + damage.secondary.value; if (realDamage == 0) { return true; } if (spectators.empty()) { map.getSpectators(spectators, targetPos, true, true); } message.primary.value = damage.primary.value; message.secondary.value = damage.secondary.value; uint8_t hitEffect; if (message.primary.value) { combatGetTypeInfo(damage.primary.type, target, message.primary.color, hitEffect); if (hitEffect != CONST_ME_NONE) { addMagicEffect(spectators, targetPos, hitEffect); } } if (message.secondary.value) { combatGetTypeInfo(damage.secondary.type, target, message.secondary.color, hitEffect); if (hitEffect != CONST_ME_NONE) { addMagicEffect(spectators, targetPos, hitEffect); } } if (message.primary.color != TEXTCOLOR_NONE || message.secondary.color != TEXTCOLOR_NONE) { std::stringstream ss; ss << realDamage << (realDamage != 1 ? " hitpoints" : " hitpoint"); std::string damageString = ss.str(); std::string spectatorMessage; for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer->getPosition().z != targetPos.z) { continue; } if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " due to your attack."; message.type = MESSAGE_DAMAGE_DEALT; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); ss << "You lose " << damageString; if (!attacker) { ss << '.'; } else if (targetPlayer == attackerPlayer) { ss << " due to your own attack."; } else { ss << " due to an attack by " << attacker->getNameDescription() << '.'; } message.type = MESSAGE_DAMAGE_RECEIVED; message.text = ss.str(); } else { message.type = MESSAGE_DAMAGE_OTHERS; if (spectatorMessage.empty()) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString; if (attacker) { ss << " due to "; if (attacker == target) { if (targetPlayer) { ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack"); } else { ss << "its own attack"; } } else { ss << "an attack by " << attacker->getNameDescription(); } } ss << '.'; spectatorMessage = ss.str(); } message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } if (realDamage >= targetHealth) { for (CreatureEvent* creatureEvent : target->getCreatureEvents(CREATURE_EVENT_PREPAREDEATH)) { if (!creatureEvent->executeOnPrepareDeath(target, attacker)) { return false; } } } target->drainHealth(attacker, realDamage); addCreatureHealth(spectators, target); } return true; } bool Game::combatChangeMana(Creature* attacker, Creature* target, CombatDamage& damage) { Player* targetPlayer = target->getPlayer(); if (!targetPlayer) { return true; } int32_t manaChange = damage.primary.value + damage.secondary.value; if (manaChange > 0) { if (attacker) { const Player* attackerPlayer = attacker->getPlayer(); if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(target) == SKULL_NONE) { return false; } } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeMana(attacker, target, damage); } } int32_t realManaChange = targetPlayer->getMana(); targetPlayer->changeMana(manaChange); realManaChange = targetPlayer->getMana() - realManaChange; if (realManaChange > 0 && !targetPlayer->isInGhostMode()) { TextMessage message(MESSAGE_HEALED, "You gained " + std::to_string(realManaChange) + " mana."); message.position = target->getPosition(); message.primary.value = realManaChange; message.primary.color = TEXTCOLOR_MAYABLUE; targetPlayer->sendTextMessage(message); } } else { const Position& targetPos = target->getPosition(); if (!target->isAttackable()) { if (!target->isInGhostMode()) { addMagicEffect(targetPos, CONST_ME_POFF); } return false; } Player* attackerPlayer; if (attacker) { attackerPlayer = attacker->getPlayer(); } else { attackerPlayer = nullptr; } if (attackerPlayer && attackerPlayer->getSkull() == SKULL_BLACK && attackerPlayer->getSkullClient(targetPlayer) == SKULL_NONE) { return false; } int32_t manaLoss = std::min<int32_t>(targetPlayer->getMana(), -manaChange); BlockType_t blockType = target->blockHit(attacker, COMBAT_MANADRAIN, manaLoss); if (blockType != BLOCK_NONE) { addMagicEffect(targetPos, CONST_ME_POFF); return false; } if (manaLoss <= 0) { return true; } if (damage.origin != ORIGIN_NONE) { const auto& events = target->getCreatureEvents(CREATURE_EVENT_MANACHANGE); if (!events.empty()) { for (CreatureEvent* creatureEvent : events) { creatureEvent->executeManaChange(target, attacker, damage); } damage.origin = ORIGIN_NONE; return combatChangeMana(attacker, target, damage); } } targetPlayer->drainMana(attacker, manaLoss); std::stringstream ss; std::string damageString = std::to_string(manaLoss); std::string spectatorMessage; TextMessage message; message.position = targetPos; message.primary.value = manaLoss; message.primary.color = TEXTCOLOR_BLUE; SpectatorHashSet spectators; map.getSpectators(spectators, targetPos, false, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); if (tmpPlayer == attackerPlayer && attackerPlayer != targetPlayer) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana due to your attack."; message.type = MESSAGE_DAMAGE_DEALT; message.text = ss.str(); } else if (tmpPlayer == targetPlayer) { ss.str({}); ss << "You lose " << damageString << " mana"; if (!attacker) { ss << '.'; } else if (targetPlayer == attackerPlayer) { ss << " due to your own attack."; } else { ss << " mana due to an attack by " << attacker->getNameDescription() << '.'; } message.type = MESSAGE_DAMAGE_RECEIVED; message.text = ss.str(); } else { if (spectatorMessage.empty()) { ss.str({}); ss << ucfirst(target->getNameDescription()) << " loses " << damageString << " mana"; if (attacker) { ss << " due to "; if (attacker == target) { ss << (targetPlayer->getSex() == PLAYERSEX_FEMALE ? "her own attack" : "his own attack"); } else { ss << "an attack by " << attacker->getNameDescription(); } } ss << '.'; spectatorMessage = ss.str(); } message.type = MESSAGE_DAMAGE_OTHERS; message.text = spectatorMessage; } tmpPlayer->sendTextMessage(message); } } return true; } void Game::addCreatureHealth(const Creature* target) { SpectatorHashSet spectators; map.getSpectators(spectators, target->getPosition(), true, true); addCreatureHealth(spectators, target); } void Game::addCreatureHealth(const SpectatorHashSet& spectators, const Creature* target) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendCreatureHealth(target); } } } void Game::addMagicEffect(const Position& pos, uint8_t effect) { SpectatorHashSet spectators; map.getSpectators(spectators, pos, true, true); addMagicEffect(spectators, pos, effect); } void Game::addMagicEffect(const SpectatorHashSet& spectators, const Position& pos, uint8_t effect) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendMagicEffect(pos, effect); } } } void Game::addDistanceEffect(const Position& fromPos, const Position& toPos, uint8_t effect) { SpectatorHashSet spectators; map.getSpectators(spectators, fromPos, false, true); map.getSpectators(spectators, toPos, false, true); addDistanceEffect(spectators, fromPos, toPos, effect); } void Game::addDistanceEffect(const SpectatorHashSet& spectators, const Position& fromPos, const Position& toPos, uint8_t effect) { for (Creature* spectator : spectators) { if (Player* tmpPlayer = spectator->getPlayer()) { tmpPlayer->sendDistanceShoot(fromPos, toPos, effect); } } } void Game::startDecay(Item* item) { if (!item || !item->canDecay()) { return; } ItemDecayState_t decayState = item->getDecaying(); if (decayState == DECAYING_TRUE) { return; } if (item->getDuration() > 0) { item->incrementReferenceCounter(); item->setDecaying(DECAYING_TRUE); toDecayItems.push_front(item); } else { internalDecayItem(item); } } void Game::internalDecayItem(Item* item) { const ItemType& it = Item::items[item->getID()]; if (it.decayTo != 0) { Item* newItem = transformItem(item, it.decayTo); startDecay(newItem); } else { ReturnValue ret = internalRemoveItem(item); if (ret != RETURNVALUE_NOERROR) { std::cout << "[Debug - Game::internalDecayItem] internalDecayItem failed, error code: " << static_cast<uint32_t>(ret) << ", item id: " << item->getID() << std::endl; } } } void Game::checkDecay() { g_scheduler.addEvent(createSchedulerTask(EVENT_DECAYINTERVAL, std::bind(&Game::checkDecay, this))); size_t bucket = (lastBucket + 1) % EVENT_DECAY_BUCKETS; auto it = decayItems[bucket].begin(), end = decayItems[bucket].end(); while (it != end) { Item* item = *it; if (!item->canDecay()) { item->setDecaying(DECAYING_FALSE); ReleaseItem(item); it = decayItems[bucket].erase(it); continue; } int32_t duration = item->getDuration(); int32_t decreaseTime = std::min<int32_t>(EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS, duration); duration -= decreaseTime; item->decreaseDuration(decreaseTime); if (duration <= 0) { it = decayItems[bucket].erase(it); internalDecayItem(item); ReleaseItem(item); } else if (duration < EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) { it = decayItems[bucket].erase(it); size_t newBucket = (bucket + ((duration + EVENT_DECAYINTERVAL / 2) / 1000)) % EVENT_DECAY_BUCKETS; if (newBucket == bucket) { internalDecayItem(item); ReleaseItem(item); } else { decayItems[newBucket].push_back(item); } } else { ++it; } } lastBucket = bucket; cleanup(); } void Game::checkLight() { g_scheduler.addEvent(createSchedulerTask(EVENT_LIGHTINTERVAL, std::bind(&Game::checkLight, this))); lightHour += lightHourDelta; if (lightHour > 1440) { lightHour -= 1440; } if (std::abs(lightHour - SUNRISE) < 2 * lightHourDelta) { lightState = LIGHT_STATE_SUNRISE; } else if (std::abs(lightHour - SUNSET) < 2 * lightHourDelta) { lightState = LIGHT_STATE_SUNSET; } int32_t newLightLevel = lightLevel; bool lightChange = false; switch (lightState) { case LIGHT_STATE_SUNRISE: { newLightLevel += (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30; lightChange = true; break; } case LIGHT_STATE_SUNSET: { newLightLevel -= (LIGHT_LEVEL_DAY - LIGHT_LEVEL_NIGHT) / 30; lightChange = true; break; } default: break; } if (newLightLevel <= LIGHT_LEVEL_NIGHT) { lightLevel = LIGHT_LEVEL_NIGHT; lightState = LIGHT_STATE_NIGHT; } else if (newLightLevel >= LIGHT_LEVEL_DAY) { lightLevel = LIGHT_LEVEL_DAY; lightState = LIGHT_STATE_DAY; } else { lightLevel = newLightLevel; } if (lightChange) { LightInfo lightInfo = getWorldLightInfo(); for (const auto& it : players) { it.second->sendWorldLight(lightInfo); } } } LightInfo Game::getWorldLightInfo() const { return {lightLevel, 0xD7}; } void Game::shutdown() { std::cout << "Shutting down..." << std::flush; g_scheduler.shutdown(); g_databaseTasks.shutdown(); g_dispatcher.shutdown(); map.spawns.clear(); raids.clear(); cleanup(); if (serviceManager) { serviceManager->stop(); } ConnectionManager::getInstance().closeAll(); std::cout << " done!" << std::endl; } void Game::cleanup() { //free memory for (auto creature : ToReleaseCreatures) { creature->decrementReferenceCounter(); } ToReleaseCreatures.clear(); for (auto item : ToReleaseItems) { item->decrementReferenceCounter(); } ToReleaseItems.clear(); for (Item* item : toDecayItems) { const uint32_t dur = item->getDuration(); if (dur >= EVENT_DECAYINTERVAL * EVENT_DECAY_BUCKETS) { decayItems[lastBucket].push_back(item); } else { decayItems[(lastBucket + 1 + dur / 1000) % EVENT_DECAY_BUCKETS].push_back(item); } } toDecayItems.clear(); } void Game::ReleaseCreature(Creature* creature) { ToReleaseCreatures.push_back(creature); } void Game::ReleaseItem(Item* item) { ToReleaseItems.push_back(item); } void Game::broadcastMessage(const std::string& text, MessageClasses type) const { std::cout << "> Broadcasted message: \"" << text << "\"." << std::endl; for (const auto& it : players) { it.second->sendTextMessage(type, text); } } void Game::updateCreatureWalkthrough(const Creature* creature) { //send to clients SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { Player* tmpPlayer = spectator->getPlayer(); tmpPlayer->sendCreatureWalkthrough(creature, tmpPlayer->canWalkthroughEx(creature)); } } void Game::updateCreatureSkull(const Creature* creature) { if (getWorldType() != WORLD_TYPE_PVP) { return; } SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureSkull(creature); } } void Game::updatePlayerShield(Player* player) { SpectatorHashSet spectators; map.getSpectators(spectators, player->getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureShield(player); } } void Game::updatePlayerHelpers(const Player& player) { uint32_t creatureId = player.getID(); uint16_t helpers = player.getHelpers(); SpectatorHashSet spectators; map.getSpectators(spectators, player.getPosition(), true, true); for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureHelpers(creatureId, helpers); } } void Game::updateCreatureType(Creature* creature) { const Player* masterPlayer = nullptr; uint32_t creatureId = creature->getID(); CreatureType_t creatureType = creature->getType(); if (creatureType == CREATURETYPE_MONSTER) { const Creature* master = creature->getMaster(); if (master) { masterPlayer = master->getPlayer(); if (masterPlayer) { creatureType = CREATURETYPE_SUMMON_OTHERS; } } } //send to clients SpectatorHashSet spectators; map.getSpectators(spectators, creature->getPosition(), true, true); if (creatureType == CREATURETYPE_SUMMON_OTHERS) { for (Creature* spectator : spectators) { Player* player = spectator->getPlayer(); if (masterPlayer == player) { player->sendCreatureType(creatureId, CREATURETYPE_SUMMON_OWN); } else { player->sendCreatureType(creatureId, creatureType); } } } else { for (Creature* spectator : spectators) { spectator->getPlayer()->sendCreatureType(creatureId, creatureType); } } } void Game::updatePremium(Account& account) { bool save = false; time_t timeNow = time(nullptr); if (account.premiumDays != 0 && account.premiumDays != std::numeric_limits<uint16_t>::max()) { if (account.lastDay == 0) { account.lastDay = timeNow; save = true; } else { uint32_t days = (timeNow - account.lastDay) / 86400; if (days > 0) { if (days >= account.premiumDays) { account.premiumDays = 0; account.lastDay = 0; } else { account.premiumDays -= days; time_t remainder = (timeNow - account.lastDay) % 86400; account.lastDay = timeNow - remainder; } save = true; } } } else if (account.lastDay != 0) { account.lastDay = 0; save = true; } if (save && !IOLoginData::saveAccount(account)) { std::cout << "> ERROR: Failed to save account: " << account.name << "!" << std::endl; } } void Game::loadMotdNum() { Database& db = Database::getInstance(); DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_num'"); if (result) { motdNum = result->getNumber<uint32_t>("value"); } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_num', '0')"); } result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'motd_hash'"); if (result) { motdHash = result->getString("value"); if (motdHash != transformToSHA1(g_config.getString(ConfigManager::MOTD))) { ++motdNum; } } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('motd_hash', '')"); } } void Game::saveMotdNum() const { Database& db = Database::getInstance(); std::ostringstream query; query << "UPDATE `server_config` SET `value` = '" << motdNum << "' WHERE `config` = 'motd_num'"; db.executeQuery(query.str()); query.str(std::string()); query << "UPDATE `server_config` SET `value` = '" << transformToSHA1(g_config.getString(ConfigManager::MOTD)) << "' WHERE `config` = 'motd_hash'"; db.executeQuery(query.str()); } void Game::checkPlayersRecord() { const size_t playersOnline = getPlayersOnline(); if (playersOnline > playersRecord) { uint32_t previousRecord = playersRecord; playersRecord = playersOnline; for (auto& it : g_globalEvents->getEventMap(GLOBALEVENT_RECORD)) { it.second.executeRecord(playersRecord, previousRecord); } updatePlayersRecord(); } } void Game::updatePlayersRecord() const { Database& db = Database::getInstance(); std::ostringstream query; query << "UPDATE `server_config` SET `value` = '" << playersRecord << "' WHERE `config` = 'players_record'"; db.executeQuery(query.str()); } void Game::loadPlayersRecord() { Database& db = Database::getInstance(); DBResult_ptr result = db.storeQuery("SELECT `value` FROM `server_config` WHERE `config` = 'players_record'"); if (result) { playersRecord = result->getNumber<uint32_t>("value"); } else { db.executeQuery("INSERT INTO `server_config` (`config`, `value`) VALUES ('players_record', '0')"); } } uint64_t Game::getExperienceStage(uint32_t level) { if (!stagesEnabled) { return g_config.getNumber(ConfigManager::RATE_EXPERIENCE); } if (useLastStageLevel && level >= lastStageLevel) { return stages[lastStageLevel]; } return stages[level]; } bool Game::loadExperienceStages() { pugi::xml_document doc; pugi::xml_parse_result result = doc.load_file("data/XML/stages.xml"); if (!result) { printXMLError("Error - Game::loadExperienceStages", "data/XML/stages.xml", result); return false; } for (auto stageNode : doc.child("stages").children()) { if (strcasecmp(stageNode.name(), "config") == 0) { stagesEnabled = stageNode.attribute("enabled").as_bool(); } else { uint32_t minLevel, maxLevel, multiplier; pugi::xml_attribute minLevelAttribute = stageNode.attribute("minlevel"); if (minLevelAttribute) { minLevel = pugi::cast<uint32_t>(minLevelAttribute.value()); } else { minLevel = 1; } pugi::xml_attribute maxLevelAttribute = stageNode.attribute("maxlevel"); if (maxLevelAttribute) { maxLevel = pugi::cast<uint32_t>(maxLevelAttribute.value()); } else { maxLevel = 0; lastStageLevel = minLevel; useLastStageLevel = true; } pugi::xml_attribute multiplierAttribute = stageNode.attribute("multiplier"); if (multiplierAttribute) { multiplier = pugi::cast<uint32_t>(multiplierAttribute.value()); } else { multiplier = 1; } if (useLastStageLevel) { stages[lastStageLevel] = multiplier; } else { for (uint32_t i = minLevel; i <= maxLevel; ++i) { stages[i] = multiplier; } } } } return true; } void Game::playerInviteToParty(uint32_t playerId, uint32_t invitedId) { if (playerId == invitedId) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } Player* invitedPlayer = getPlayerByID(invitedId); if (!invitedPlayer || invitedPlayer->isInviting(player)) { return; } if (invitedPlayer->getParty()) { std::ostringstream ss; ss << invitedPlayer->getName() << " is already in a party."; player->sendTextMessage(MESSAGE_INFO_DESCR, ss.str()); return; } Party* party = player->getParty(); if (!party) { party = new Party(player); } else if (party->getLeader() != player) { return; } party->invitePlayer(*invitedPlayer); } void Game::playerJoinParty(uint32_t playerId, uint32_t leaderId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Player* leader = getPlayerByID(leaderId); if (!leader || !leader->isInviting(player)) { return; } Party* party = leader->getParty(); if (!party || party->getLeader() != leader) { return; } if (player->getParty()) { player->sendTextMessage(MESSAGE_INFO_DESCR, "You are already in a party."); return; } party->joinParty(*player); } void Game::playerRevokePartyInvitation(uint32_t playerId, uint32_t invitedId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || party->getLeader() != player) { return; } Player* invitedPlayer = getPlayerByID(invitedId); if (!invitedPlayer || !player->isInviting(invitedPlayer)) { return; } party->revokeInvitation(*invitedPlayer); } void Game::playerPassPartyLeadership(uint32_t playerId, uint32_t newLeaderId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || party->getLeader() != player) { return; } Player* newLeader = getPlayerByID(newLeaderId); if (!newLeader || !player->isPartner(newLeader)) { return; } party->passPartyLeadership(newLeader); } void Game::playerLeaveParty(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || player->hasCondition(CONDITION_INFIGHT)) { return; } party->leaveParty(player); } void Game::playerEnableSharedPartyExperience(uint32_t playerId, bool sharedExpActive) { Player* player = getPlayerByID(playerId); if (!player) { return; } Party* party = player->getParty(); if (!party || (player->hasCondition(CONDITION_INFIGHT) && player->getZone() != ZONE_PROTECTION)) { return; } party->setSharedExperience(player, sharedExpActive); } void Game::sendGuildMotd(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } Guild* guild = player->getGuild(); if (guild) { player->sendChannelMessage("Message of the Day", guild->getMotd(), TALKTYPE_CHANNEL_R1, CHANNEL_GUILD); } } void Game::kickPlayer(uint32_t playerId, bool displayEffect) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->kickPlayer(displayEffect); } void Game::playerReportRuleViolation(uint32_t playerId, const std::string& targetName, uint8_t reportType, uint8_t reportReason, const std::string& comment, const std::string& translation) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_events->eventPlayerOnReportRuleViolation(player, targetName, reportType, reportReason, comment, translation); } void Game::playerReportBug(uint32_t playerId, const std::string& message, const Position& position, uint8_t category) { Player* player = getPlayerByID(playerId); if (!player) { return; } g_events->eventPlayerOnReportBug(player, message, position, category); } void Game::playerDebugAssert(uint32_t playerId, const std::string& assertLine, const std::string& date, const std::string& description, const std::string& comment) { Player* player = getPlayerByID(playerId); if (!player) { return; } // TODO: move debug assertions to database FILE* file = fopen("client_assertions.txt", "a"); if (file) { fprintf(file, "----- %s - %s (%s) -----\n", formatDate(time(nullptr)).c_str(), player->getName().c_str(), convertIPToString(player->getIP()).c_str()); fprintf(file, "%s\n%s\n%s\n%s\n", assertLine.c_str(), date.c_str(), description.c_str(), comment.c_str()); fclose(file); } } void Game::playerLeaveMarket(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } player->setInMarket(false); } void Game::playerBrowseMarket(uint32_t playerId, uint16_t spriteId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const ItemType& it = Item::items.getItemIdByClientId(spriteId); if (it.id == 0) { return; } if (it.wareId == 0) { return; } const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id); const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id); player->sendMarketBrowseItem(it.id, buyOffers, sellOffers); player->sendMarketDetail(it.id); } void Game::playerBrowseMarketOwnOffers(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const MarketOfferList& buyOffers = IOMarket::getOwnOffers(MARKETACTION_BUY, player->getGUID()); const MarketOfferList& sellOffers = IOMarket::getOwnOffers(MARKETACTION_SELL, player->getGUID()); player->sendMarketBrowseOwnOffers(buyOffers, sellOffers); } void Game::playerBrowseMarketOwnHistory(uint32_t playerId) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } const HistoryMarketOfferList& buyOffers = IOMarket::getOwnHistory(MARKETACTION_BUY, player->getGUID()); const HistoryMarketOfferList& sellOffers = IOMarket::getOwnHistory(MARKETACTION_SELL, player->getGUID()); player->sendMarketBrowseOwnHistory(buyOffers, sellOffers); } void Game::playerCreateMarketOffer(uint32_t playerId, uint8_t type, uint16_t spriteId, uint16_t amount, uint32_t price, bool anonymous) { if (amount == 0 || amount > 64000) { return; } if (price == 0 || price > 999999999) { return; } if (type != MARKETACTION_BUY && type != MARKETACTION_SELL) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } if (g_config.getBoolean(ConfigManager::MARKET_PREMIUM) && !player->isPremium()) { player->sendMarketLeave(); return; } const ItemType& itt = Item::items.getItemIdByClientId(spriteId); if (itt.id == 0 || itt.wareId == 0) { return; } const ItemType& it = Item::items.getItemIdByClientId(itt.wareId); if (it.id == 0 || it.wareId == 0) { return; } if (!it.stackable && amount > 2000) { return; } const uint32_t maxOfferCount = g_config.getNumber(ConfigManager::MAX_MARKET_OFFERS_AT_A_TIME_PER_PLAYER); if (maxOfferCount != 0 && IOMarket::getPlayerOfferCount(player->getGUID()) >= maxOfferCount) { return; } uint64_t fee = (price / 100.) * amount; if (fee < 20) { fee = 20; } else if (fee > 1000) { fee = 1000; } if (type == MARKETACTION_SELL) { if (fee > player->bankBalance) { return; } DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false); if (!depotChest) { return; } std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox()); if (itemList.empty()) { return; } if (it.stackable) { uint16_t tmpAmount = amount; for (Item* item : itemList) { uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount()); tmpAmount -= removeCount; internalRemoveItem(item, removeCount); if (tmpAmount == 0) { break; } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } player->bankBalance -= fee; } else { uint64_t totalPrice = static_cast<uint64_t>(price) * amount; totalPrice += fee; if (totalPrice > player->bankBalance) { return; } player->bankBalance -= totalPrice; } IOMarket::createOffer(player->getGUID(), static_cast<MarketAction_t>(type), it.id, amount, price, anonymous); player->sendMarketEnter(player->getLastDepotId()); const MarketOfferList& buyOffers = IOMarket::getActiveOffers(MARKETACTION_BUY, it.id); const MarketOfferList& sellOffers = IOMarket::getActiveOffers(MARKETACTION_SELL, it.id); player->sendMarketBrowseItem(it.id, buyOffers, sellOffers); } void Game::playerCancelMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter); if (offer.id == 0 || offer.playerId != player->getGUID()) { return; } if (offer.type == MARKETACTION_BUY) { player->bankBalance += static_cast<uint64_t>(offer.price) * offer.amount; player->sendMarketEnter(player->getLastDepotId()); } else { const ItemType& it = Item::items[offer.itemId]; if (it.id == 0) { return; } if (it.stackable) { uint16_t tmpAmount = offer.amount; while (tmpAmount > 0) { int32_t stackCount = std::min<int32_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < offer.amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } } IOMarket::moveOfferToHistory(offer.id, OFFERSTATE_CANCELLED); offer.amount = 0; offer.timestamp += g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION); player->sendMarketCancelOffer(offer); player->sendMarketEnter(player->getLastDepotId()); } void Game::playerAcceptMarketOffer(uint32_t playerId, uint32_t timestamp, uint16_t counter, uint16_t amount) { if (amount == 0 || amount > 64000) { return; } Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->isInMarket()) { return; } MarketOfferEx offer = IOMarket::getOfferByCounter(timestamp, counter); if (offer.id == 0) { return; } if (amount > offer.amount) { return; } const ItemType& it = Item::items[offer.itemId]; if (it.id == 0) { return; } uint64_t totalPrice = static_cast<uint64_t>(offer.price) * amount; if (offer.type == MARKETACTION_BUY) { DepotChest* depotChest = player->getDepotChest(player->getLastDepotId(), false); if (!depotChest) { return; } std::forward_list<Item*> itemList = getMarketItemList(it.wareId, amount, depotChest, player->getInbox()); if (itemList.empty()) { return; } Player* buyerPlayer = getPlayerByGUID(offer.playerId); if (!buyerPlayer) { buyerPlayer = new Player(nullptr); if (!IOLoginData::loadPlayerById(buyerPlayer, offer.playerId)) { delete buyerPlayer; return; } } if (it.stackable) { uint16_t tmpAmount = amount; for (Item* item : itemList) { uint16_t removeCount = std::min<uint16_t>(tmpAmount, item->getItemCount()); tmpAmount -= removeCount; internalRemoveItem(item, removeCount); if (tmpAmount == 0) { break; } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } player->bankBalance += totalPrice; if (it.stackable) { uint16_t tmpAmount = amount; while (tmpAmount > 0) { uint16_t stackCount = std::min<uint16_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(buyerPlayer->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } if (buyerPlayer->isOffline()) { IOLoginData::savePlayer(buyerPlayer); delete buyerPlayer; } else { buyerPlayer->onReceiveMail(); } } else { if (totalPrice > player->bankBalance) { return; } player->bankBalance -= totalPrice; if (it.stackable) { uint16_t tmpAmount = amount; while (tmpAmount > 0) { uint16_t stackCount = std::min<uint16_t>(100, tmpAmount); Item* item = Item::CreateItem(it.id, stackCount); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } tmpAmount -= stackCount; } } else { int32_t subType; if (it.charges != 0) { subType = it.charges; } else { subType = -1; } for (uint16_t i = 0; i < amount; ++i) { Item* item = Item::CreateItem(it.id, subType); if (internalAddItem(player->getInbox(), item, INDEX_WHEREEVER, FLAG_NOLIMIT) != RETURNVALUE_NOERROR) { delete item; break; } } } Player* sellerPlayer = getPlayerByGUID(offer.playerId); if (sellerPlayer) { sellerPlayer->bankBalance += totalPrice; } else { IOLoginData::increaseBankBalance(offer.playerId, totalPrice); } player->onReceiveMail(); } const int32_t marketOfferDuration = g_config.getNumber(ConfigManager::MARKET_OFFER_DURATION); IOMarket::appendHistory(player->getGUID(), (offer.type == MARKETACTION_BUY ? MARKETACTION_SELL : MARKETACTION_BUY), offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTEDEX); IOMarket::appendHistory(offer.playerId, offer.type, offer.itemId, amount, offer.price, offer.timestamp + marketOfferDuration, OFFERSTATE_ACCEPTED); offer.amount -= amount; if (offer.amount == 0) { IOMarket::deleteOffer(offer.id); } else { IOMarket::acceptOffer(offer.id, amount); } player->sendMarketEnter(player->getLastDepotId()); offer.timestamp += marketOfferDuration; player->sendMarketAcceptOffer(offer); } void Game::parsePlayerExtendedOpcode(uint32_t playerId, uint8_t opcode, const std::string& buffer) { Player* player = getPlayerByID(playerId); if (!player) { return; } for (CreatureEvent* creatureEvent : player->getCreatureEvents(CREATURE_EVENT_EXTENDED_OPCODE)) { creatureEvent->executeExtendedOpcode(player, opcode, buffer); } } std::forward_list<Item*> Game::getMarketItemList(uint16_t wareId, uint16_t sufficientCount, DepotChest* depotChest, Inbox* inbox) { std::forward_list<Item*> itemList; uint16_t count = 0; std::list<Container*> containers { depotChest, inbox }; do { Container* container = containers.front(); containers.pop_front(); for (Item* item : container->getItemList()) { Container* c = item->getContainer(); if (c && !c->empty()) { containers.push_back(c); continue; } const ItemType& itemType = Item::items[item->getID()]; if (itemType.wareId != wareId) { continue; } if (c && (!itemType.isContainer() || c->capacity() != itemType.maxItems)) { continue; } if (!item->hasMarketAttributes()) { continue; } itemList.push_front(item); count += Item::countByType(item, -1); if (count >= sufficientCount) { return itemList; } } } while (!containers.empty()); return std::forward_list<Item*>(); } void Game::forceAddCondition(uint32_t creatureId, Condition* condition) { Creature* creature = getCreatureByID(creatureId); if (!creature) { delete condition; return; } creature->addCondition(condition, true); } void Game::forceRemoveCondition(uint32_t creatureId, ConditionType_t type) { Creature* creature = getCreatureByID(creatureId); if (!creature) { return; } creature->removeCondition(type, true); } void Game::sendOfflineTrainingDialog(Player* player) { if (!player) { return; } if (!player->hasModalWindowOpen(offlineTrainingWindow.id)) { player->sendModalWindow(offlineTrainingWindow); } } void Game::playerAnswerModalWindow(uint32_t playerId, uint32_t modalWindowId, uint8_t button, uint8_t choice) { Player* player = getPlayerByID(playerId); if (!player) { return; } if (!player->hasModalWindowOpen(modalWindowId)) { return; } player->onModalWindowHandled(modalWindowId); // offline training, hardcoded if (modalWindowId == std::numeric_limits<uint32_t>::max()) { if (button == 1) { if (choice == SKILL_SWORD || choice == SKILL_AXE || choice == SKILL_CLUB || choice == SKILL_DISTANCE || choice == SKILL_MAGLEVEL) { BedItem* bedItem = player->getBedItem(); if (bedItem && bedItem->sleep(player)) { player->setOfflineTrainingSkill(choice); return; } } } else { player->sendTextMessage(MESSAGE_EVENT_ADVANCE, "Offline training aborted."); } player->setBedItem(nullptr); } else { for (auto creatureEvent : player->getCreatureEvents(CREATURE_EVENT_MODALWINDOW)) { creatureEvent->executeModalWindow(player, modalWindowId, button, choice); } } } void Game::addPlayer(Player* player) { const std::string& lowercase_name = asLowerCaseString(player->getName()); mappedPlayerNames[lowercase_name] = player; wildcardTree.insert(lowercase_name); players[player->getID()] = player; } void Game::removePlayer(Player* player) { const std::string& lowercase_name = asLowerCaseString(player->getName()); mappedPlayerNames.erase(lowercase_name); wildcardTree.remove(lowercase_name); players.erase(player->getID()); } void Game::addNpc(Npc* npc) { npcs[npc->getID()] = npc; } void Game::removeNpc(Npc* npc) { npcs.erase(npc->getID()); } void Game::addMonster(Monster* monster) { monsters[monster->getID()] = monster; } void Game::removeMonster(Monster* monster) { monsters.erase(monster->getID()); } Guild* Game::getGuild(uint32_t id) const { auto it = guilds.find(id); if (it == guilds.end()) { return nullptr; } return it->second; } void Game::addGuild(Guild* guild) { guilds[guild->getId()] = guild; } void Game::removeGuild(uint32_t guildId) { guilds.erase(guildId); } void Game::decreaseBrowseFieldRef(const Position& pos) { Tile* tile = map.getTile(pos.x, pos.y, pos.z); if (!tile) { return; } auto it = browseFields.find(tile); if (it != browseFields.end()) { it->second->decrementReferenceCounter(); } } void Game::internalRemoveItems(std::vector<Item*> itemList, uint32_t amount, bool stackable) { if (stackable) { for (Item* item : itemList) { if (item->getItemCount() > amount) { internalRemoveItem(item, amount); break; } else { amount -= item->getItemCount(); internalRemoveItem(item); } } } else { for (Item* item : itemList) { internalRemoveItem(item); } } } BedItem* Game::getBedBySleeper(uint32_t guid) const { auto it = bedSleepersMap.find(guid); if (it == bedSleepersMap.end()) { return nullptr; } return it->second; } void Game::setBedSleeper(BedItem* bed, uint32_t guid) { bedSleepersMap[guid] = bed; } void Game::removeBedSleeper(uint32_t guid) { auto it = bedSleepersMap.find(guid); if (it != bedSleepersMap.end()) { bedSleepersMap.erase(it); } } Item* Game::getUniqueItem(uint16_t uniqueId) { auto it = uniqueItems.find(uniqueId); if (it == uniqueItems.end()) { return nullptr; } return it->second; } bool Game::addUniqueItem(uint16_t uniqueId, Item* item) { auto result = uniqueItems.emplace(uniqueId, item); if (!result.second) { std::cout << "Duplicate unique id: " << uniqueId << std::endl; } return result.second; } void Game::removeUniqueItem(uint16_t uniqueId) { auto it = uniqueItems.find(uniqueId); if (it != uniqueItems.end()) { uniqueItems.erase(it); } } bool Game::reload(ReloadTypes_t reloadType) { switch (reloadType) { case RELOAD_TYPE_ACTIONS: return g_actions->reload(); case RELOAD_TYPE_CHAT: return g_chat->load(); case RELOAD_TYPE_CONFIG: return g_config.reload(); case RELOAD_TYPE_CREATURESCRIPTS: return g_creatureEvents->reload(); case RELOAD_TYPE_EVENTS: return g_events->load(); case RELOAD_TYPE_GLOBALEVENTS: return g_globalEvents->reload(); case RELOAD_TYPE_ITEMS: return Item::items.reload(); case RELOAD_TYPE_MONSTERS: return g_monsters.reload(); case RELOAD_TYPE_MOUNTS: return mounts.reload(); case RELOAD_TYPE_MOVEMENTS: return g_moveEvents->reload(); case RELOAD_TYPE_NPCS: { Npcs::reload(); return true; } case RELOAD_TYPE_QUESTS: return quests.reload(); case RELOAD_TYPE_RAIDS: return raids.reload() && raids.startup(); case RELOAD_TYPE_SPELLS: { if (!g_spells->reload()) { std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl; std::terminate(); } else if (!g_monsters.reload()) { std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl; std::terminate(); } return true; } case RELOAD_TYPE_TALKACTIONS: return g_talkActions->reload(); case RELOAD_TYPE_WEAPONS: { bool results = g_weapons->reload(); g_weapons->loadDefaults(); return results; } default: { if (!g_spells->reload()) { std::cout << "[Error - Game::reload] Failed to reload spells." << std::endl; std::terminate(); } else if (!g_monsters.reload()) { std::cout << "[Error - Game::reload] Failed to reload monsters." << std::endl; std::terminate(); } g_actions->reload(); g_config.reload(); g_creatureEvents->reload(); g_monsters.reload(); g_moveEvents->reload(); Npcs::reload(); raids.reload() && raids.startup(); g_talkActions->reload(); Item::items.reload(); g_weapons->reload(); g_weapons->loadDefaults(); quests.reload(); mounts.reload(); g_globalEvents->reload(); g_events->load(); g_chat->load(); return true; } } }
1
16,371
What if `Game::getPlayerByNameWildcard` had not been called before. It would not find a player, wouldn't it?
otland-forgottenserver
cpp
@@ -129,7 +129,7 @@ public class ApplicationsSidebar extends Sidebar { this.testingCheck = new SidebarCheckBox(tr("Testing")); this.testingCheck.selectedProperty().bindBidirectional(filter.containTestingApplicationsProperty()); - this.requiresPatchCheck = new SidebarCheckBox(tr("Requires patch")); + this.requiresPatchCheck = new SidebarCheckBox(tr("Patch Required")); this.requiresPatchCheck.selectedProperty().bindBidirectional(filter.containRequiresPatchApplicationsProperty()); this.commercialCheck = new SidebarCheckBox(tr("Commercial"));
1
package org.phoenicis.javafx.views.mainwindow.apps; import javafx.beans.binding.Bindings; import javafx.collections.FXCollections; import javafx.collections.ObservableList; import javafx.geometry.Pos; import javafx.scene.control.CheckBox; import javafx.scene.control.ToggleButton; import org.phoenicis.javafx.settings.JavaFxSettingsManager; import org.phoenicis.javafx.views.common.DelayedFilterTextConsumer; import org.phoenicis.javafx.views.common.lists.PhoenicisFilteredList; import org.phoenicis.javafx.views.common.widgets.lists.CombinedListWidget; import org.phoenicis.javafx.views.mainwindow.ui.*; import org.phoenicis.repository.dto.ApplicationDTO; import org.phoenicis.repository.dto.CategoryDTO; import java.util.function.Consumer; import static org.phoenicis.configuration.localisation.Localisation.tr; /** * An instance of this class represents the sidebar of the apps tab view. * This sidebar contains three items: * <ul> * <li> * A searchbar, which enables the user to search for an application in the selected categories of his/her repositories. * </li> * <li> * A toggle group containing all categories contained in his/her repositories including an "All" category. * </li> * <li> * A filter group, containing filters to be used to remove testing, requires patch and * commercial applications from the shown applications * </li> * </ul> * * @author marc * @since 21.04.17 */ public class ApplicationsSidebar extends Sidebar { private final ApplicationFilter filter; private final JavaFxSettingsManager javaFxSettingsManager; // the search bar user for application filtering/searching private SearchBox searchBar; // container for the center content of this sidebar private SidebarScrollPane centerContent; private ObservableList<CategoryDTO> categories; private PhoenicisFilteredList<CategoryDTO> filteredCategories; // the toggleable categories private SidebarToggleGroup<CategoryDTO> categoryView; // the group containing the application filters (testing, noCdNeeded and commercial) private SidebarGroup filterGroup; private CheckBox testingCheck; private CheckBox requiresPatchCheck; private CheckBox commercialCheck; private CheckBox operatingSystemCheck; // widget to switch between the different list widgets in the center view private ListWidgetChooser<ApplicationDTO> listWidgetChooser; // consumers called after a category selection has been made private Runnable onAllCategorySelection; private Consumer<CategoryDTO> onCategorySelection; /** * Constructor * * @param combinedListWidget The list widget to be managed by the ListWidgetChooser in the sidebar * @param javaFxSettingsManager The settings manager for the JavaFX GUI */ public ApplicationsSidebar(CombinedListWidget<ApplicationDTO> combinedListWidget, ApplicationFilter filter, JavaFxSettingsManager javaFxSettingsManager) { super(); this.filter = filter; this.javaFxSettingsManager = javaFxSettingsManager; this.populateSearchBar(); this.populateCategories(); this.populateFilters(); this.populateListWidgetChooser(combinedListWidget); this.centerContent = new SidebarScrollPane(this.categoryView, new SidebarSpacer(), this.filterGroup); this.setTop(this.searchBar); this.setCenter(this.centerContent); this.setBottom(this.listWidgetChooser); } /** * This method selects the "All" application category */ public void selectAllCategories() { this.categoryView.selectAll(); } /** * This method binds the given category list <code>categories</code> to the categories toggle group. * * @param categories The to be bound category list */ public void bindCategories(ObservableList<CategoryDTO> categories) { Bindings.bindContent(this.categories, categories); } private void populateSearchBar() { this.searchBar = new SearchBox(new DelayedFilterTextConsumer(filter::setFilterText), () -> filter.setFilterText("")); } private void populateCategories() { this.categories = FXCollections.observableArrayList(); this.filteredCategories = new PhoenicisFilteredList<>(categories, filter::filter); this.filter.addOnFilterChanged(filteredCategories::trigger); this.categoryView = SidebarToggleGroup.create(tr("Categories"), this::createAllCategoriesToggleButton, this::createCategoryToggleButton); Bindings.bindContent(categoryView.getElements(), filteredCategories); } private void populateFilters() { this.testingCheck = new SidebarCheckBox(tr("Testing")); this.testingCheck.selectedProperty().bindBidirectional(filter.containTestingApplicationsProperty()); this.requiresPatchCheck = new SidebarCheckBox(tr("Requires patch")); this.requiresPatchCheck.selectedProperty().bindBidirectional(filter.containRequiresPatchApplicationsProperty()); this.commercialCheck = new SidebarCheckBox(tr("Commercial")); this.commercialCheck.selectedProperty().bindBidirectional(filter.containCommercialApplicationsProperty()); this.commercialCheck.setSelected(true); this.operatingSystemCheck = new SidebarCheckBox(tr("All Operating Systems")); this.operatingSystemCheck.selectedProperty().bindBidirectional(filter.containAllOSCompatibleApplications()); this.operatingSystemCheck.setSelected(false); this.filterGroup = new SidebarGroup("Filters", testingCheck, requiresPatchCheck, commercialCheck, operatingSystemCheck); } /** * This method populates the list widget choose * * @param combinedListWidget The managed CombinedListWidget */ private void populateListWidgetChooser(CombinedListWidget<ApplicationDTO> combinedListWidget) { this.listWidgetChooser = new ListWidgetChooser<>(combinedListWidget); this.listWidgetChooser.setAlignment(Pos.BOTTOM_LEFT); this.listWidgetChooser.choose(this.javaFxSettingsManager.getAppsListType()); this.listWidgetChooser.setOnChoose(type -> { this.javaFxSettingsManager.setAppsListType(type); this.javaFxSettingsManager.save(); }); } /** * This method is responsible for creating the "All" categories toggle button. * * @return The newly created "All" categories toggle button */ private ToggleButton createAllCategoriesToggleButton() { final SidebarToggleButton allCategoryButton = new SidebarToggleButton(tr("All")); allCategoryButton.setSelected(true); allCategoryButton.setId("allButton"); allCategoryButton.setOnAction(event -> onAllCategorySelection.run()); return allCategoryButton; } /** * This method is responsible for creating a toggle button for a given category. * * @param category The category for which a toggle button should be created * @return The newly created toggle button */ private ToggleButton createCategoryToggleButton(CategoryDTO category) { final SidebarToggleButton categoryButton = new SidebarToggleButton(category.getName()); categoryButton.setId(String.format("%sButton", category.getId().toLowerCase())); categoryButton.setOnAction(event -> onCategorySelection.accept(category)); return categoryButton; } /** * This method sets the consumer, that is called after a category has been selected * * @param onAllCategorySelection The new consumer to be used */ public void setOnAllCategorySelection(Runnable onAllCategorySelection) { this.onAllCategorySelection = onAllCategorySelection; } /** * This method sets the consumer, that is called after the "All" categories toggle button has been selected * * @param onCategorySelection The new consumer to be used */ public void setOnCategorySelection(Consumer<CategoryDTO> onCategorySelection) { this.onCategorySelection = onCategorySelection; } }
1
12,072
Why upper case "R"?
PhoenicisOrg-phoenicis
java
@@ -107,3 +107,19 @@ func relativeDockerfilePath(ws copilotDirGetter, dockerfilePath string) (string, } return relDfPath, nil } + +// dfBuildRequired returns if the container image should be built from local Dockerfile. +func dfBuildRequired(svc interface{}) (bool, error) { + type manifest interface { + BuildRequired() (bool, error) + } + mf, ok := svc.(manifest) + if !ok { + return false, fmt.Errorf("service does not have required methods BuildRequired()") + } + required, err := mf.BuildRequired() + if err != nil { + return false, fmt.Errorf("check if service requires building from local Dockerfile: %w", err) + } + return required, nil +}
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 // Package cli contains the copilot subcommands. package cli import ( "errors" "fmt" "os" "path/filepath" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/copilot-cli/internal/pkg/term/color" "github.com/aws/copilot-cli/internal/pkg/workspace" "github.com/spf13/cobra" ) // tryReadingAppName retrieves the application's name from the workspace if it exists and returns it. // If there is an error while retrieving the workspace summary, returns the empty string. func tryReadingAppName() string { ws, err := workspace.New() if err != nil { return "" } summary, err := ws.Summary() if err != nil { return "" } return summary.Application } type errReservedArg struct { val string } func (e *errReservedArg) Error() string { return fmt.Sprintf(`argument %s is a reserved keyword, please use a different value`, color.HighlightUserInput(e.val)) } // reservedArgs returns an error if the arguments contain any reserved keywords. func reservedArgs(cmd *cobra.Command, args []string) error { if len(args) != 1 { return nil } if args[0] == "local" { return &errReservedArg{val: "local"} } return nil } // runCmdE wraps one of the run error methods, PreRunE, RunE, of a cobra command so that if a user // types "help" in the arguments the usage string is printed instead of running the command. func runCmdE(f func(cmd *cobra.Command, args []string) error) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { if len(args) == 1 && args[0] == "help" { _ = cmd.Help() // Help always returns nil. os.Exit(0) } return f(cmd, args) } } // returns true if error type is stack set not exist. func isStackSetNotExistsErr(err error) bool { if err == nil { return false } aerr, ok := err.(awserr.Error) if !ok { return isStackSetNotExistsErr(errors.Unwrap(err)) } if aerr.Code() != "StackSetNotFoundException" { return isStackSetNotExistsErr(errors.Unwrap(err)) } return true } // relPath returns the path relative to the current working directory. func relPath(fullPath string) (string, error) { wkdir, err := os.Getwd() if err != nil { return "", fmt.Errorf("get working directory: %w", err) } path, err := filepath.Rel(wkdir, fullPath) if err != nil { return "", fmt.Errorf("get relative path of file: %w", err) } return path, nil } // relativeDockerfilePath returns the path from the workspace root to the Dockerfile. func relativeDockerfilePath(ws copilotDirGetter, dockerfilePath string) (string, error) { copilotDirPath, err := ws.CopilotDirPath() if err != nil { return "", fmt.Errorf("get copilot directory: %w", err) } wsRoot := filepath.Dir(copilotDirPath) absDfPath, err := filepath.Abs(dockerfilePath) if err != nil { return "", fmt.Errorf("get absolute path: %v", err) } relDfPath, err := filepath.Rel(wsRoot, absDfPath) if err != nil { return "", fmt.Errorf("find relative path from workspace root to Dockerfile: %v", err) } return relDfPath, nil }
1
15,287
Does this need to return an error or could it return `false, nil`?
aws-copilot-cli
go
@@ -94,7 +94,7 @@ public abstract class Analyzer implements Closeable { * Create a new Analyzer, reusing the same set of components per-thread * across calls to {@link #tokenStream(String, Reader)}. */ - public Analyzer() { + protected Analyzer() { this(GLOBAL_REUSE_STRATEGY); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.analysis; import java.io.Closeable; import java.io.IOException; import java.io.Reader; import java.io.StringReader; import java.util.HashMap; import java.util.Map; import java.util.function.Consumer; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.AttributeFactory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; import org.apache.lucene.util.Version; /** * An Analyzer builds TokenStreams, which analyze text. It thus represents a * policy for extracting index terms from text. * <p> * In order to define what analysis is done, subclasses must define their * {@link TokenStreamComponents TokenStreamComponents} in {@link #createComponents(String)}. * The components are then reused in each call to {@link #tokenStream(String, Reader)}. * <p> * Simple example: * <pre class="prettyprint"> * Analyzer analyzer = new Analyzer() { * {@literal @Override} * protected TokenStreamComponents createComponents(String fieldName) { * Tokenizer source = new FooTokenizer(reader); * TokenStream filter = new FooFilter(source); * filter = new BarFilter(filter); * return new TokenStreamComponents(source, filter); * } * {@literal @Override} * protected TokenStream normalize(TokenStream in) { * // Assuming FooFilter is about normalization and BarFilter is about * // stemming, only FooFilter should be applied * return new FooFilter(in); * } * }; * </pre> * For more examples, see the {@link org.apache.lucene.analysis Analysis package documentation}. * <p> * For some concrete implementations bundled with Lucene, look in the analysis modules: * <ul> * <li><a href="{@docRoot}/../analysis/common/overview-summary.html">Common</a>: * Analyzers for indexing content in different languages and domains. * <li><a href="{@docRoot}/../analysis/icu/overview-summary.html">ICU</a>: * Exposes functionality from ICU to Apache Lucene. * <li><a href="{@docRoot}/../analysis/kuromoji/overview-summary.html">Kuromoji</a>: * Morphological analyzer for Japanese text. * <li><a href="{@docRoot}/../analysis/morfologik/overview-summary.html">Morfologik</a>: * Dictionary-driven lemmatization for the Polish language. * <li><a href="{@docRoot}/../analysis/phonetic/overview-summary.html">Phonetic</a>: * Analysis for indexing phonetic signatures (for sounds-alike search). * <li><a href="{@docRoot}/../analysis/smartcn/overview-summary.html">Smart Chinese</a>: * Analyzer for Simplified Chinese, which indexes words. * <li><a href="{@docRoot}/../analysis/stempel/overview-summary.html">Stempel</a>: * Algorithmic Stemmer for the Polish Language. * </ul> * * @since 3.1 */ public abstract class Analyzer implements Closeable { private final ReuseStrategy reuseStrategy; private Version version = Version.LATEST; // non final as it gets nulled if closed; pkg private for access by ReuseStrategy's final helper methods: CloseableThreadLocal<Object> storedValue = new CloseableThreadLocal<>(); /** * Create a new Analyzer, reusing the same set of components per-thread * across calls to {@link #tokenStream(String, Reader)}. */ public Analyzer() { this(GLOBAL_REUSE_STRATEGY); } /** * Expert: create a new Analyzer with a custom {@link ReuseStrategy}. * <p> * NOTE: if you just want to reuse on a per-field basis, it's easier to * use a subclass of {@link AnalyzerWrapper} such as * <a href="{@docRoot}/../analysis/common/org/apache/lucene/analysis/miscellaneous/PerFieldAnalyzerWrapper.html"> * PerFieldAnalyerWrapper</a> instead. */ public Analyzer(ReuseStrategy reuseStrategy) { this.reuseStrategy = reuseStrategy; } /** * Creates a new {@link TokenStreamComponents} instance for this analyzer. * * @param fieldName * the name of the fields content passed to the * {@link TokenStreamComponents} sink as a reader * @return the {@link TokenStreamComponents} for this analyzer. */ protected abstract TokenStreamComponents createComponents(String fieldName); /** * Wrap the given {@link TokenStream} in order to apply normalization filters. * The default implementation returns the {@link TokenStream} as-is. This is * used by {@link #normalize(String, String)}. */ protected TokenStream normalize(String fieldName, TokenStream in) { return in; } /** * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing * the contents of <code>reader</code>. * <p> * This method uses {@link #createComponents(String)} to obtain an * instance of {@link TokenStreamComponents}. It returns the sink of the * components and stores the components internally. Subsequent calls to this * method will reuse the previously stored components after resetting them * through {@link TokenStreamComponents#setReader(Reader)}. * <p> * <b>NOTE:</b> After calling this method, the consumer must follow the * workflow described in {@link TokenStream} to properly consume its contents. * See the {@link org.apache.lucene.analysis Analysis package documentation} for * some examples demonstrating this. * * <b>NOTE:</b> If your data is available as a {@code String}, use * {@link #tokenStream(String, String)} which reuses a {@code StringReader}-like * instance internally. * * @param fieldName the name of the field the created TokenStream is used for * @param reader the reader the streams source reads from * @return TokenStream for iterating the analyzed content of <code>reader</code> * @throws AlreadyClosedException if the Analyzer is closed. * @see #tokenStream(String, String) */ public final TokenStream tokenStream(final String fieldName, final Reader reader) { TokenStreamComponents components = reuseStrategy.getReusableComponents(this, fieldName); final Reader r = initReader(fieldName, reader); if (components == null) { components = createComponents(fieldName); reuseStrategy.setReusableComponents(this, fieldName, components); } components.setReader(r); return components.getTokenStream(); } /** * Returns a TokenStream suitable for <code>fieldName</code>, tokenizing * the contents of <code>text</code>. * <p> * This method uses {@link #createComponents(String)} to obtain an * instance of {@link TokenStreamComponents}. It returns the sink of the * components and stores the components internally. Subsequent calls to this * method will reuse the previously stored components after resetting them * through {@link TokenStreamComponents#setReader(Reader)}. * <p> * <b>NOTE:</b> After calling this method, the consumer must follow the * workflow described in {@link TokenStream} to properly consume its contents. * See the {@link org.apache.lucene.analysis Analysis package documentation} for * some examples demonstrating this. * * @param fieldName the name of the field the created TokenStream is used for * @param text the String the streams source reads from * @return TokenStream for iterating the analyzed content of <code>reader</code> * @throws AlreadyClosedException if the Analyzer is closed. * @see #tokenStream(String, Reader) */ public final TokenStream tokenStream(final String fieldName, final String text) { TokenStreamComponents components = reuseStrategy.getReusableComponents(this, fieldName); @SuppressWarnings("resource") final ReusableStringReader strReader = (components == null || components.reusableStringReader == null) ? new ReusableStringReader() : components.reusableStringReader; strReader.setValue(text); final Reader r = initReader(fieldName, strReader); if (components == null) { components = createComponents(fieldName); reuseStrategy.setReusableComponents(this, fieldName, components); } components.setReader(r); components.reusableStringReader = strReader; return components.getTokenStream(); } /** * Normalize a string down to the representation that it would have in the * index. * <p> * This is typically used by query parsers in order to generate a query on * a given term, without tokenizing or stemming, which are undesirable if * the string to analyze is a partial word (eg. in case of a wildcard or * fuzzy query). * <p> * This method uses {@link #initReaderForNormalization(String, Reader)} in * order to apply necessary character-level normalization and then * {@link #normalize(String, TokenStream)} in order to apply the normalizing * token filters. */ public final BytesRef normalize(final String fieldName, final String text) { try { // apply char filters final String filteredText; try (Reader reader = new StringReader(text)) { Reader filterReader = initReaderForNormalization(fieldName, reader); char[] buffer = new char[64]; StringBuilder builder = new StringBuilder(); for (;;) { final int read = filterReader.read(buffer, 0, buffer.length); if (read == -1) { break; } builder.append(buffer, 0, read); } filteredText = builder.toString(); } catch (IOException e) { throw new IllegalStateException("Normalization threw an unexpected exception", e); } final AttributeFactory attributeFactory = attributeFactory(fieldName); try (TokenStream ts = normalize(fieldName, new StringTokenStream(attributeFactory, filteredText, text.length()))) { final TermToBytesRefAttribute termAtt = ts.addAttribute(TermToBytesRefAttribute.class); ts.reset(); if (ts.incrementToken() == false) { throw new IllegalStateException("The normalization token stream is " + "expected to produce exactly 1 token, but got 0 for analyzer " + this + " and input \"" + text + "\""); } final BytesRef term = BytesRef.deepCopyOf(termAtt.getBytesRef()); if (ts.incrementToken()) { throw new IllegalStateException("The normalization token stream is " + "expected to produce exactly 1 token, but got 2+ for analyzer " + this + " and input \"" + text + "\""); } ts.end(); return term; } } catch (IOException e) { throw new IllegalStateException("Normalization threw an unexpected exception", e); } } /** * Override this if you want to add a CharFilter chain. * <p> * The default implementation returns <code>reader</code> * unchanged. * * @param fieldName IndexableField name being indexed * @param reader original Reader * @return reader, optionally decorated with CharFilter(s) */ protected Reader initReader(String fieldName, Reader reader) { return reader; } /** Wrap the given {@link Reader} with {@link CharFilter}s that make sense * for normalization. This is typically a subset of the {@link CharFilter}s * that are applied in {@link #initReader(String, Reader)}. This is used by * {@link #normalize(String, String)}. */ protected Reader initReaderForNormalization(String fieldName, Reader reader) { return reader; } /** Return the {@link AttributeFactory} to be used for * {@link #tokenStream analysis} and * {@link #normalize(String, String) normalization} on the given * {@code FieldName}. The default implementation returns * {@link TokenStream#DEFAULT_TOKEN_ATTRIBUTE_FACTORY}. */ protected AttributeFactory attributeFactory(String fieldName) { return TokenStream.DEFAULT_TOKEN_ATTRIBUTE_FACTORY; } /** * Invoked before indexing a IndexableField instance if * terms have already been added to that field. This allows custom * analyzers to place an automatic position increment gap between * IndexbleField instances using the same field name. The default value * position increment gap is 0. With a 0 position increment gap and * the typical default token position increment of 1, all terms in a field, * including across IndexableField instances, are in successive positions, allowing * exact PhraseQuery matches, for instance, across IndexableField instance boundaries. * * @param fieldName IndexableField name being indexed. * @return position increment gap, added to the next token emitted from {@link #tokenStream(String,Reader)}. * This value must be {@code >= 0}. */ public int getPositionIncrementGap(String fieldName) { return 0; } /** * Just like {@link #getPositionIncrementGap}, except for * Token offsets instead. By default this returns 1. * This method is only called if the field * produced at least one token for indexing. * * @param fieldName the field just indexed * @return offset gap, added to the next token emitted from {@link #tokenStream(String,Reader)}. * This value must be {@code >= 0}. */ public int getOffsetGap(String fieldName) { return 1; } /** * Returns the used {@link ReuseStrategy}. */ public final ReuseStrategy getReuseStrategy() { return reuseStrategy; } /** * Set the version of Lucene this analyzer should mimic the behavior for for analysis. */ public void setVersion(Version v) { version = v; // TODO: make write once? } /** * Return the version of Lucene this analyzer will mimic the behavior of for analysis. */ public Version getVersion() { return version; } /** Frees persistent resources used by this Analyzer */ @Override public void close() { if (storedValue != null) { storedValue.close(); storedValue = null; } } /** * This class encapsulates the outer components of a token stream. It provides * access to the source (a {@link Reader} {@link Consumer} and the outer end (sink), an * instance of {@link TokenFilter} which also serves as the * {@link TokenStream} returned by * {@link Analyzer#tokenStream(String, Reader)}. */ public static final class TokenStreamComponents { /** * Original source of the tokens. */ protected final Consumer<Reader> source; /** * Sink tokenstream, such as the outer tokenfilter decorating * the chain. This can be the source if there are no filters. */ protected final TokenStream sink; /** Internal cache only used by {@link Analyzer#tokenStream(String, String)}. */ transient ReusableStringReader reusableStringReader; /** * Creates a new {@link TokenStreamComponents} instance. * * @param source * the source to set the reader on * @param result * the analyzer's resulting token stream */ public TokenStreamComponents(final Consumer<Reader> source, final TokenStream result) { this.source = source; this.sink = result; } /** * Creates a new {@link TokenStreamComponents} instance * @param tokenizer the analyzer's Tokenizer * @param result the analyzer's resulting token stream */ public TokenStreamComponents(final Tokenizer tokenizer, final TokenStream result) { this(tokenizer::setReader, result); } /** * Creates a new {@link TokenStreamComponents} from a Tokenizer */ public TokenStreamComponents(final Tokenizer tokenizer) { this(tokenizer::setReader, tokenizer); } /** * Resets the encapsulated components with the given reader. If the components * cannot be reset, an Exception should be thrown. * * @param reader * a reader to reset the source component */ private void setReader(final Reader reader) { source.accept(reader); } /** * Returns the sink {@link TokenStream} * * @return the sink {@link TokenStream} */ public TokenStream getTokenStream() { return sink; } /** * Returns the component's source */ public Consumer<Reader> getSource() { return source; } } /** * Strategy defining how TokenStreamComponents are reused per call to * {@link Analyzer#tokenStream(String, java.io.Reader)}. */ public static abstract class ReuseStrategy { /** Sole constructor. (For invocation by subclass constructors, typically implicit.) */ public ReuseStrategy() {} /** * Gets the reusable TokenStreamComponents for the field with the given name. * * @param analyzer Analyzer from which to get the reused components. Use * {@link #getStoredValue(Analyzer)} and {@link #setStoredValue(Analyzer, Object)} * to access the data on the Analyzer. * @param fieldName Name of the field whose reusable TokenStreamComponents * are to be retrieved * @return Reusable TokenStreamComponents for the field, or {@code null} * if there was no previous components for the field */ public abstract TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName); /** * Stores the given TokenStreamComponents as the reusable components for the * field with the give name. * * @param fieldName Name of the field whose TokenStreamComponents are being set * @param components TokenStreamComponents which are to be reused for the field */ public abstract void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components); /** * Returns the currently stored value. * * @return Currently stored value or {@code null} if no value is stored * @throws AlreadyClosedException if the Analyzer is closed. */ protected final Object getStoredValue(Analyzer analyzer) { if (analyzer.storedValue == null) { throw new AlreadyClosedException("this Analyzer is closed"); } return analyzer.storedValue.get(); } /** * Sets the stored value. * * @param storedValue Value to store * @throws AlreadyClosedException if the Analyzer is closed. */ protected final void setStoredValue(Analyzer analyzer, Object storedValue) { if (analyzer.storedValue == null) { throw new AlreadyClosedException("this Analyzer is closed"); } analyzer.storedValue.set(storedValue); } } /** * A predefined {@link ReuseStrategy} that reuses the same components for * every field. */ public static final ReuseStrategy GLOBAL_REUSE_STRATEGY = new ReuseStrategy() { @Override public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) { return (TokenStreamComponents) getStoredValue(analyzer); } @Override public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components) { setStoredValue(analyzer, components); } }; /** * A predefined {@link ReuseStrategy} that reuses components per-field by * maintaining a Map of TokenStreamComponent per field name. */ public static final ReuseStrategy PER_FIELD_REUSE_STRATEGY = new ReuseStrategy() { @SuppressWarnings("unchecked") @Override public TokenStreamComponents getReusableComponents(Analyzer analyzer, String fieldName) { Map<String, TokenStreamComponents> componentsPerField = (Map<String, TokenStreamComponents>) getStoredValue(analyzer); return componentsPerField != null ? componentsPerField.get(fieldName) : null; } @SuppressWarnings("unchecked") @Override public void setReusableComponents(Analyzer analyzer, String fieldName, TokenStreamComponents components) { Map<String, TokenStreamComponents> componentsPerField = (Map<String, TokenStreamComponents>) getStoredValue(analyzer); if (componentsPerField == null) { componentsPerField = new HashMap<>(); setStoredValue(analyzer, componentsPerField); } componentsPerField.put(fieldName, components); } }; private static final class StringTokenStream extends TokenStream { private final String value; private final int length; private boolean used = true; private final CharTermAttribute termAttribute = addAttribute(CharTermAttribute.class); private final OffsetAttribute offsetAttribute = addAttribute(OffsetAttribute.class); StringTokenStream(AttributeFactory attributeFactory, String value, int length) { super(attributeFactory); this.value = value; this.length = length; } @Override public void reset() { used = false; } @Override public boolean incrementToken() { if (used) { return false; } clearAttributes(); termAttribute.append(value); offsetAttribute.setOffset(0, length); used = true; return true; } @Override public void end() throws IOException { super.end(); offsetAttribute.setOffset(length, length); } } }
1
36,178
Can you not change those scopes in public API classes? This applies here and in other places -- protected changed to package-scope for source is not really an API-compatible change.
apache-lucene-solr
java
@@ -85,7 +85,7 @@ class TranslationsController extends BaseAdminController 'item_to_translate' => $item_to_translate, 'item_name' => $item_name, 'module_part' => $module_part, - 'view_missing_traductions_only' => $this->getRequest()->get('view_missing_traductions_only', 1), + 'view_missing_traductions_only' => $this->getRequest()->get('view_missing_traductions_only'), 'max_input_vars_warning' => false, );
1
<?php /*************************************************************************************/ /* This file is part of the Thelia package. */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* For the full copyright and license information, please view the LICENSE.txt */ /* file that was distributed with this source code. */ /*************************************************************************************/ namespace Thelia\Controller\Admin; use Symfony\Component\Finder\Finder; use Thelia\Core\Security\Resource\AdminResources; use Thelia\Core\Security\AccessManager; use Thelia\Core\Translation\Translator; use Thelia\Model\Module; use Thelia\Model\ModuleQuery; use Thelia\Core\Template\TemplateHelper; use Thelia\Core\Template\TemplateDefinition; use Thelia\Tools\URL; /** * Class LangController * @package Thelia\Controller\Admin * @author Manuel Raynaud <[email protected]> */ class TranslationsController extends BaseAdminController { /** * @param string $item_name the modume code * @return Module the module object * @throws \InvalidArgumentException if module was not found */ protected function getModule($item_name) { if (null !== $module = ModuleQuery::create()->findPk($item_name)) { return $module; } throw new \InvalidArgumentException( $this->getTranslator()->trans("No module found for code '%item'", ['%item' => $item_name]) ); } protected function getModuleTemplateNames(Module $module, $template_type) { $templates = TemplateHelper::getInstance()->getList( $template_type, $module->getAbsoluteTemplateBasePath() ); $names = []; foreach ($templates as $template) { $names[] = $template->getName(); } return $names; } protected function renderTemplate() { // Get related strings, if all input data are here $item_to_translate = $this->getRequest()->get('item_to_translate'); $item_name = $this->getRequest()->get('item_name', ''); if ($item_to_translate == 'mo' && ! empty($item_name)) { $module_part = $this->getRequest()->get('module_part', ''); } else { $module_part = false; } $all_strings = array(); $template = $directory = $i18n_directory = false; $walkMode = TemplateHelper::WALK_MODE_TEMPLATE; $templateArguments = array( 'item_to_translate' => $item_to_translate, 'item_name' => $item_name, 'module_part' => $module_part, 'view_missing_traductions_only' => $this->getRequest()->get('view_missing_traductions_only', 1), 'max_input_vars_warning' => false, ); // Find the i18n directory, and the directory to examine. if (! empty($item_name) || $item_to_translate == 'co') { switch ($item_to_translate) { // Module core case 'mo': $module = $this->getModule($item_name); if ($module_part == 'core') { $directory = $module->getAbsoluteBaseDir(); $domain = $module->getTranslationDomain(); $i18n_directory = $module->getAbsoluteI18nPath(); $walkMode = TemplateHelper::WALK_MODE_PHP; } elseif ($module_part == 'admin-includes') { $directory = $module->getAbsoluteAdminIncludesPath(); $domain = $module->getAdminIncludesTranslationDomain(); $i18n_directory = $module->getAbsoluteAdminIncludesI18nPath(); $walkMode = TemplateHelper::WALK_MODE_TEMPLATE; } elseif (! empty($module_part)) { // Front or back office template, form of $module_part is [bo|fo].subdir-name list($type, $subdir) = explode('.', $module_part); if ($type == 'bo') { $directory = $module->getAbsoluteBackOfficeTemplatePath($subdir); $domain = $module->getBackOfficeTemplateTranslationDomain($subdir); $i18n_directory = $module->getAbsoluteBackOfficeI18nTemplatePath($subdir); } elseif ($type == 'fo') { $directory = $module->getAbsoluteFrontOfficeTemplatePath($subdir); $domain = $module->getFrontOfficeTemplateTranslationDomain($subdir); $i18n_directory = $module->getAbsoluteFrontOfficeI18nTemplatePath($subdir); } else { throw new \InvalidArgumentException("Undefined module template type: '$type'."); } $walkMode = TemplateHelper::WALK_MODE_TEMPLATE; } // Modules translations files are in the cache, and are not always // updated. Force a reload of the files to get last changes. if (! empty($domain)) { $this->loadTranslation($i18n_directory, $domain); } // List front and back office templates defined by this module $templateArguments['back_office_templates'] = implode(',', $this->getModuleTemplateNames($module, TemplateDefinition::BACK_OFFICE)); $templateArguments['front_office_templates'] = implode(',', $this->getModuleTemplateNames($module, TemplateDefinition::FRONT_OFFICE)); // Check if we have admin-include files try { $finder = Finder::create() ->files() ->depth(0) ->in($module->getAbsoluteAdminIncludesPath()) ->name('/\.html$/i') ; $hasAdminIncludes = $finder->count() > 0; } catch (\InvalidArgumentException $ex) { $hasAdminIncludes = false; } $templateArguments['has_admin_includes'] = $hasAdminIncludes; break; // Thelia Core case 'co': $directory = THELIA_ROOT . 'core/lib/Thelia'; $domain = 'core'; $i18n_directory = THELIA_ROOT . 'core/lib/Thelia/Config/I18n'; $walkMode = TemplateHelper::WALK_MODE_PHP; break; // Front-office template case 'fo': $template = new TemplateDefinition($item_name, TemplateDefinition::FRONT_OFFICE); break; // Back-office template case 'bo': $template = new TemplateDefinition($item_name, TemplateDefinition::BACK_OFFICE); break; // PDF templates case 'pf': $template = new TemplateDefinition($item_name, TemplateDefinition::PDF); break; // Email templates case 'ma': $template = new TemplateDefinition($item_name, TemplateDefinition::EMAIL); break; } if ($template) { $directory = $template->getAbsolutePath(); $i18n_directory = $template->getAbsoluteI18nPath(); $domain = $template->getTranslationDomain(); // Load translations files is this template is not the current template // as it is not loaded in Thelia.php if (! TemplateHelper::getInstance()->isActive($template)) { $this->loadTranslation($i18n_directory, $domain); } } // Load strings to translate if ($directory && ! empty($domain)) { // Save the string set, if the form was submitted if ($i18n_directory) { $save_mode = $this->getRequest()->get('save_mode', false); if ($save_mode !== false) { $texts = $this->getRequest()->get('text', array()); if (! empty($texts)) { $file = sprintf("%s".DS."%s.php", $i18n_directory, $this->getCurrentEditionLocale()); $translations = $this->getRequest()->get('translation', array()); TemplateHelper::getInstance()->writeTranslation($file, $texts, $translations, true); if ($save_mode == 'stay') { return $this->generateRedirectFromRoute("admin.configuration.translations", $templateArguments); } else { return $this->generateRedirect(URL::getInstance()->adminViewUrl('configuration')); } } } } // Load strings $stringsCount = TemplateHelper::getInstance()->walkDir( $directory, $walkMode, $this->getTranslator(), $this->getCurrentEditionLocale(), $domain, $all_strings ); // Estimate number of fields, and compare to php ini max_input_vars $stringsCount = $stringsCount * 2 + 6; if ($stringsCount > ini_get('max_input_vars')) { $templateArguments['max_input_vars_warning'] = true; $templateArguments['required_max_input_vars'] = $stringsCount; $templateArguments['current_max_input_vars'] = ini_get('max_input_vars'); } else { $templateArguments['all_strings'] = $all_strings; } } } return $this->render('translations', $templateArguments); } public function defaultAction() { if (null !== $response = $this->checkAuth(AdminResources::TRANSLATIONS, array(), AccessManager::VIEW)) { return $response; } return $this->renderTemplate(); } public function updateAction() { if (null !== $response = $this->checkAuth(AdminResources::LANGUAGE, array(), AccessManager::UPDATE)) { return $response; } return $this->renderTemplate(); } private function loadTranslation($directory, $domain) { try { $finder = Finder::create() ->files() ->depth(0) ->in($directory); /** @var \DirectoryIterator $file */ foreach ($finder as $file) { list($locale, $format) = explode('.', $file->getBaseName(), 2); Translator::getInstance()->addResource($format, $file->getPathname(), $locale, $domain); } } catch (\InvalidArgumentException $ex) { // Ignore missing I18n directories } } }
1
10,745
this parameter is only used in POST, please use `getRequest()->request->get('...');` Thanks
thelia-thelia
php
@@ -29,8 +29,8 @@ export default function(config: Config, auth: IAuth, storage: IStorageHandler) { // this might be too harsh, so ask if it causes trouble // $FlowFixMe app.param('package', validatePackage); + app.param('filename', validatePackage); // $FlowFixMe - app.param('filename', validateName); app.param('tag', validateName); app.param('version', validateName); app.param('revision', validateName);
1
/** * @prettier * @flow */ import type { IAuth, IStorageHandler } from '../../../types'; import type { Config } from '@verdaccio/types'; import express from 'express'; import bodyParser from 'body-parser'; import whoami from './api/whoami'; import ping from './api/ping'; import user from './api/user'; import distTags from './api/dist-tags'; import publish from './api/publish'; import search from './api/search'; import pkg from './api/package'; import stars from './api/stars'; import profile from './api/v1/profile'; const { match, validateName, validatePackage, encodeScopePackage, antiLoop } = require('../middleware'); export default function(config: Config, auth: IAuth, storage: IStorageHandler) { /* eslint new-cap:off */ const app = express.Router(); /* eslint new-cap:off */ // validate all of these params as a package name // this might be too harsh, so ask if it causes trouble // $FlowFixMe app.param('package', validatePackage); // $FlowFixMe app.param('filename', validateName); app.param('tag', validateName); app.param('version', validateName); app.param('revision', validateName); app.param('token', validateName); // these can't be safely put into express url for some reason // TODO: For some reason? what reason? app.param('_rev', match(/^-rev$/)); app.param('org_couchdb_user', match(/^org\.couchdb\.user:/)); app.param('anything', match(/.*/)); app.use(auth.apiJWTmiddleware()); app.use(bodyParser.json({ strict: false, limit: config.max_body_size || '10mb' })); app.use(antiLoop(config)); // encode / in a scoped package name to be matched as a single parameter in routes app.use(encodeScopePackage); // for "npm whoami" whoami(app); pkg(app, auth, storage, config); profile(app, auth); search(app, auth, storage); user(app, auth, config); distTags(app, auth, storage); publish(app, auth, storage, config); ping(app); stars(app, storage); return app; }
1
20,275
Problem number 1: Scoped packages would have a `/` character here. Changing this to `validatePackage` resolves the 403.
verdaccio-verdaccio
js
@@ -639,7 +639,11 @@ class LabelledData(param.Parameterized): for k, v in self.items(): new_val = v.map(map_fn, specs, clone) if new_val is not None: - deep_mapped[k] = new_val + # Ensure key validation doesn't cause errors + try: + deep_mapped[k] = new_val + except KeyError: + pass if applies: deep_mapped = map_fn(deep_mapped) return deep_mapped else:
1
""" Provides Dimension objects for tracking the properties of a value, axis or map dimension. Also supplies the Dimensioned abstract baseclass for classes that accept Dimension values. """ from __future__ import unicode_literals import re import datetime as dt from operator import itemgetter import numpy as np import param from ..core.util import (basestring, sanitize_identifier, group_sanitizer, label_sanitizer, max_range, find_range, dimension_sanitizer, OrderedDict, bytes_to_unicode, unicode, dt64_to_dt, unique_array, builtins, config) from .options import Store, StoreOptions from .pprint import PrettyPrinter # Alias parameter support for pickle loading ALIASES = {'key_dimensions': 'kdims', 'value_dimensions': 'vdims', 'constant_dimensions': 'cdims'} title_format = "{name}: {val}{unit}" def param_aliases(d): """ Called from __setstate__ in LabelledData in order to load old pickles with outdated parameter names. Warning: We want to keep pickle hacking to a minimum! """ for old, new in ALIASES.items(): old_param = '_%s_param_value' % old new_param = '_%s_param_value' % new if old_param in d: d[new_param] = d.pop(old_param) return d class redim(object): """ Utility that supports re-dimensioning any HoloViews object via the redim method. """ def __init__(self, parent, mode=None): self.parent = parent # Can be 'dataset', 'dynamic' or None self.mode = mode def __str__(self): return "<holoviews.core.dimension.redim method>" @classmethod def replace_dimensions(cls, dimensions, overrides): """ Replaces dimensions in a list with a dictionary of overrides. Overrides should be indexed by the dimension name with values that is either a Dimension object, a string name or a dictionary specifying the dimension parameters to override. """ replaced = [] for d in dimensions: if d.name in overrides: override = overrides[d.name] elif d.label in overrides: override = overrides[d.label] else: override = None if override is None: replaced.append(d) elif isinstance(override, (basestring, tuple)): replaced.append(d(override)) elif isinstance(override, Dimension): replaced.append(override) elif isinstance(override, dict): replaced.append(d.clone(override.get('name',None), **{k:v for k,v in override.items() if k != 'name'})) else: raise ValueError('Dimension can only be overridden ' 'with another dimension or a dictionary ' 'of attributes') return replaced def _filter_cache(self, dmap, kdims): """ Returns a filtered version of the DynamicMap cache leaving only keys consistently with the newly specified values """ filtered = [] for key, value in dmap.data.items(): if not any(kd.values and v not in kd.values for kd, v in zip(kdims, key)): filtered.append((key, value)) return filtered def __call__(self, specs=None, **dimensions): """ Replace dimensions on the dataset and allows renaming dimensions in the dataset. Dimension mapping should map between the old dimension name and a dictionary of the new attributes, a completely new dimension or a new string name. """ parent = self.parent redimmed = parent if parent._deep_indexable and self.mode != 'dataset': deep_mapped = [(k, v.redim(specs, **dimensions)) for k, v in parent.items()] redimmed = parent.clone(deep_mapped) if specs is not None: if not isinstance(specs, list): specs = [specs] matches = any(parent.matches(spec) for spec in specs) if self.mode != 'dynamic' and not matches: return redimmed kdims = self.replace_dimensions(parent.kdims, dimensions) vdims = self.replace_dimensions(parent.vdims, dimensions) zipped_dims = zip(parent.kdims+parent.vdims, kdims+vdims) renames = {pk.name: nk for pk, nk in zipped_dims if pk != nk} if self.mode == 'dataset': data = parent.data if renames: data = parent.interface.redim(parent, renames) return parent.clone(data, kdims=kdims, vdims=vdims) if self.mode != 'dynamic': return redimmed.clone(kdims=kdims, vdims=vdims) redimmed = redimmed.clone(kdims=kdims, vdims=vdims, data= self._filter_cache(redimmed, kdims)) from ..util import Dynamic def dynamic_redim(obj, **dynkwargs): return obj.redim(specs, **dimensions) return Dynamic(redimmed, shared_data=True, streams=parent.streams, operation=dynamic_redim) def _redim(self, name, specs, **dims): dimensions = {k:{name:v} for k,v in dims.items()} return self(specs, **dimensions) def cyclic(self, specs=None, **values): return self._redim('cyclic', specs, **values) def value_format(self, specs=None, **values): return self._redim('value_format', specs, **values) def range(self, specs=None, **values): return self._redim('range', specs, **values) def label(self, specs=None, **values): for k, v in values.items(): dim = self.parent.get_dimension(k) if dim and dim.name != dim.label and dim.label != v: raise ValueError('Cannot override an existing Dimension label') return self._redim('label', specs, **values) def soft_range(self, specs=None, **values): return self._redim('soft_range', specs, **values) def type(self, specs=None, **values): return self._redim('type', specs, **values) def step(self, specs=None, **values): return self._redim('step', specs, **values) def unit(self, specs=None, **values): return self._redim('unit', specs, **values) def values(self, specs=None, **ranges): return self._redim('values', specs, **ranges) class Dimension(param.Parameterized): """ Dimension objects are used to specify some important general features that may be associated with a collection of values. For instance, a Dimension may specify that a set of numeric values actually correspond to 'Height' (dimension name), in units of meters, with a descriptive label 'Height of adult males'. All dimensions object have a name that identifies them and a label containing a suitable description. If the label is not explicitly specified it matches the name. These two parameters define the core identity of the dimension object and must match if two dimension objects are to be considered equivalent. All other parameters are considered optional metadata and are not used when testing for equality. Unlike all the other parameters, these core parameters can be used to construct a Dimension object from a tuple. This format is sufficient to define an identical Dimension: Dimension('a', label='Dimension A') == Dimension(('a', 'Dimension A')) Everything else about a dimension is considered to reflect non-semantic preferences. Examples include the default value (which may be used in a visualization to set an initial slider position), how the value is to rendered as text (which may be used to specify the printed floating point precision) or a suitable range of values to consider for a particular analysis. Units ----- Full unit support with automated conversions are on the HoloViews roadmap. Once rich unit objects are supported, the unit (or more specifically the type of unit) will be part of the core dimension specification used to establish equality. Until this feature is implemented, there are two auxiliary parameters that hold some partial information about the unit: the name of the unit and whether or not it is cyclic. The name of the unit is used as part of the pretty-printed representation and knowing whether it is cyclic is important for certain operations. """ name = param.String(doc=""" Short name associated with the Dimension, such as 'height' or 'weight'. Valid Python identifiers make good names, because they can be used conveniently as a keyword in many contexts.""") label = param.String(default=None, doc=""" Unrestricted label used to describe the dimension. A label should succinctly describe the dimension and may contain any characters, including Unicode and LaTeX expression.""") cyclic = param.Boolean(default=False, doc=""" Whether the range of this feature is cyclic such that the maximum allowed value (defined by the range parameter) is continuous with the minimum allowed value.""") value_format = param.Callable(default=None, doc=""" Formatting function applied to each value before display.""") range = param.Tuple(default=(None, None), doc=""" Specifies the minimum and maximum allowed values for a Dimension. None is used to represent an unlimited bound.""") soft_range = param.Tuple(default=(None, None), doc=""" Specifies a minimum and maximum reference value, which may be overridden by the data.""") type = param.Parameter(default=None, doc=""" Optional type associated with the Dimension values. The type may be an inbuilt constructor (such as int, str, float) or a custom class object.""") step = param.Number(default=None, doc=""" Optional floating point step specifying how frequently the underlying space should be sampled. May be used to define a discrete sampling of over the range.""") unit = param.String(default=None, allow_None=True, doc=""" Optional unit string associated with the Dimension. For instance, the string 'm' may be used represent units of meters and 's' to represent units of seconds.""") values = param.List(default=[], doc=""" Optional specification of the allowed value set for the dimension that may also be used to retain a categorical ordering.""") # Defines default formatting by type type_formatters = {} unit_format = ' ({unit})' presets = {} # A dictionary-like mapping name, (name,) or # (name, unit) to a preset Dimension object def __init__(self, spec, **params): """ Initializes the Dimension object with the given name. """ if 'name' in params: raise KeyError('Dimension name must only be passed as the positional argument') if isinstance(spec, Dimension): existing_params = dict(spec.get_param_values()) elif (spec, params.get('unit', None)) in self.presets.keys(): preset = self.presets[(str(spec), str(params['unit']))] existing_params = dict(preset.get_param_values()) elif spec in self.presets: existing_params = dict(self.presets[spec].get_param_values()) elif (spec,) in self.presets: existing_params = dict(self.presets[(spec,)].get_param_values()) else: existing_params = {} all_params = dict(existing_params, **params) if isinstance(spec, tuple): name, label = spec all_params['name'] = name all_params['label'] = label if 'label' in params and (label != params['label']): if params['label'] != label: self.warning('Using label as supplied by keyword ({!r}), ignoring ' 'tuple value {!r}'.format(params['label'], label)) all_params['label'] = params['label'] elif isinstance(spec, basestring): all_params['name'] = spec all_params['label'] = params.get('label', spec) if all_params['name'] == '': raise ValueError('Dimension name cannot be the empty string') if all_params['label'] in ['', None]: raise ValueError('Dimension label cannot be None or the empty string') values = params.get('values', []) if isinstance(values, basestring) and values == 'initial': self.warning("The 'initial' string for dimension values is no longer supported.") values = [] all_params['values'] = list(unique_array(values)) super(Dimension, self).__init__(**all_params) @property def spec(self): "Returns the corresponding tuple specification" return (self.name, self.label) def __call__(self, spec=None, **overrides): "Aliased to clone method. To be deprecated in 2.0" return self.clone(spec=spec, **overrides) def clone(self, spec=None, **overrides): """ Derive a new Dimension that inherits existing parameters except for the supplied, explicit overrides """ settings = dict(self.get_param_values(onlychanged=True), **overrides) if spec is None: spec = (self.name, overrides.get('label', self.label)) if 'label' in overrides and isinstance(spec, basestring) : spec = (spec, overrides['label']) elif 'label' in overrides and isinstance(spec, tuple) : if overrides['label'] != spec[1]: self.warning('Using label as supplied by keyword ({!r}), ignoring ' 'tuple value {!r}'.format(overrides['label'], spec[1])) spec = (spec[0], overrides['label']) return self.__class__(spec, **{k:v for k,v in settings.items() if k not in ['name', 'label']}) def __hash__(self): """ The hash allows Dimension objects to be used as dictionary keys in Python 3. """ return hash(self.spec) def __setstate__(self, d): """ Compatibility for pickles before alias attribute was introduced. """ super(Dimension, self).__setstate__(d) self.label = self.name def __eq__(self, other): "Implements equals operator including sanitized comparison." if isinstance(other, Dimension): return self.spec == other.spec # For comparison to strings. Name may be sanitized. return other in [self.name, self.label, dimension_sanitizer(self.name)] def __ne__(self, other): "Implements not equal operator including sanitized comparison." return not self.__eq__(other) def __lt__(self, other): "Dimensions are sorted alphanumerically by name" return self.name < other.name if isinstance(other, Dimension) else self.name < other def __str__(self): return self.name def __repr__(self): return self.pprint() @property def pprint_label(self): "The pretty-printed label string for the Dimension" unit = ('' if self.unit is None else type(self.unit)(self.unit_format).format(unit=self.unit)) return bytes_to_unicode(self.label) + bytes_to_unicode(unit) def pprint(self): changed = dict(self.get_param_values(onlychanged=True)) if len(set([changed.get(k, k) for k in ['name','label']])) == 1: return 'Dimension({spec})'.format(spec=repr(self.name)) ordering = sorted( sorted(changed.keys()), key=lambda k: (- float('inf') if self.params(k).precedence is None else self.params(k).precedence)) kws = ", ".join('%s=%r' % (k, changed[k]) for k in ordering if k != 'name') return 'Dimension({spec}, {kws})'.format(spec=repr(self.name), kws=kws) def pprint_value(self, value): """ Applies the defined formatting to the value. """ own_type = type(value) if self.type is None else self.type formatter = (self.value_format if self.value_format else self.type_formatters.get(own_type)) if formatter: if callable(formatter): return formatter(value) elif isinstance(formatter, basestring): if isinstance(value, dt.datetime): return value.strftime(formatter) elif isinstance(value, np.datetime64): return dt64_to_dt(value).strftime(formatter) elif re.findall(r"\{(\w+)\}", formatter): return formatter.format(value) else: return formatter % value return unicode(bytes_to_unicode(value)) def pprint_value_string(self, value): """ Pretty prints the dimension name and value using the global title_format variable, including the unit string (if set). Numeric types are printed to the stated rounding level. """ unit = '' if self.unit is None else ' ' + bytes_to_unicode(self.unit) value = self.pprint_value(value) return title_format.format(name=bytes_to_unicode(self.label), val=value, unit=unit) class LabelledData(param.Parameterized): """ LabelledData is a mix-in class designed to introduce the group and label parameters (and corresponding methods) to any class containing data. This class assumes that the core data contents will be held in the attribute called 'data'. Used together, group and label are designed to allow a simple and flexible means of addressing data. For instance, if you are collecting the heights of people in different demographics, you could specify the values of your objects as 'Height' and then use the label to specify the (sub)population. In this scheme, one object may have the parameters set to [group='Height', label='Children'] and another may use [group='Height', label='Adults']. Note: Another level of specification is implicit in the type (i.e class) of the LabelledData object. A full specification of a LabelledData object is therefore given by the tuple (<type>, <group>, label>). This additional level of specification is used in the traverse method. Any strings can be used for the group and label, but it can be convenient to use a capitalized string of alphanumeric characters, in which case the keys used for matching in the matches and traverse method will correspond exactly to {type}.{group}.{label}. Otherwise the strings provided will be sanitized to be valid capitalized Python identifiers, which works fine but can sometimes be confusing. """ group = param.String(default='LabelledData', constant=True, doc=""" A string describing the type of data contained by the object. By default this will typically mirror the class name.""") label = param.String(default='', constant=True, doc=""" Optional label describing the data, typically reflecting where or how it was measured. The label should allow a specific measurement or dataset to be referenced for a given group.""") _deep_indexable = False def __init__(self, data, id=None, plot_id=None, **params): """ All LabelledData subclasses must supply data to the constructor, which will be held on the .data attribute. This class also has an id instance attribute, which may be set to associate some custom options with the object. """ self.data = data self.id = id self._plot_id = plot_id or builtins.id(self) if isinstance(params.get('label',None), tuple): (alias, long_name) = params['label'] label_sanitizer.add_aliases(**{alias:long_name}) params['label'] = long_name if isinstance(params.get('group',None), tuple): (alias, long_name) = params['group'] group_sanitizer.add_aliases(**{alias:long_name}) params['group'] = long_name super(LabelledData, self).__init__(**params) if not group_sanitizer.allowable(self.group): raise ValueError("Supplied group %r contains invalid characters." % self.group) elif not label_sanitizer.allowable(self.label): raise ValueError("Supplied label %r contains invalid characters." % self.label) def clone(self, data=None, shared_data=True, new_type=None, *args, **overrides): """ Returns a clone of the object with matching parameter values containing the specified args and kwargs. If shared_data is set to True and no data explicitly supplied, the clone will share data with the original. May also supply a new_type, which will inherit all shared parameters. """ params = dict(self.get_param_values()) if new_type is None: clone_type = self.__class__ else: clone_type = new_type new_params = new_type.params() params = {k: v for k, v in params.items() if k in new_params} if params.get('group') == self.params()['group'].default: params.pop('group') settings = dict(params, **overrides) if 'id' not in settings: settings['id'] = self.id if data is None and shared_data: data = self.data settings['plot_id'] = self._plot_id # Apply name mangling for __ attribute pos_args = getattr(self, '_' + type(self).__name__ + '__pos_params', []) return clone_type(data, *args, **{k:v for k,v in settings.items() if k not in pos_args}) def relabel(self, label=None, group=None, depth=0): """ Assign a new label and/or group to an existing LabelledData object, creating a clone of the object with the new settings. """ new_data = self.data if (depth > 0) and getattr(self, '_deep_indexable', False): new_data = [] for k, v in self.data.items(): relabelled = v.relabel(group=group, label=label, depth=depth-1) new_data.append((k, relabelled)) keywords = [('label', label), ('group', group)] kwargs = {k: v for k, v in keywords if v is not None} return self.clone(new_data, **kwargs) def matches(self, spec): """ A specification may be a class, a tuple or a string. Equivalent to isinstance if a class is supplied, otherwise matching occurs on type, group and label. These may be supplied as a tuple of strings or as a single string of the form "{type}.{group}.{label}". Matching may be done on {type} alone, {type}.{group}, or {type}.{group}.{label}. The strings for the type, group, and label will each be sanitized before the match, and so the sanitized versions of those values will need to be provided if the match is to succeed. """ if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) specification = (self.__class__.__name__, self.group, self.label) split_spec = tuple(spec.split('.')) if not isinstance(spec, tuple) else spec split_spec, nocompare = zip(*((None, True) if s == '*' or s is None else (s, False) for s in split_spec)) if all(nocompare): return True match_fn = itemgetter(*(idx for idx, nc in enumerate(nocompare) if not nc)) self_spec = match_fn(split_spec) unescaped_match = match_fn(specification[:len(split_spec)]) == self_spec if unescaped_match: return True sanitizers = [sanitize_identifier, group_sanitizer, label_sanitizer] identifier_specification = tuple(fn(ident, escape=False) for ident, fn in zip(specification, sanitizers)) identifier_match = match_fn(identifier_specification[:len(split_spec)]) == self_spec return identifier_match def traverse(self, fn, specs=None, full_breadth=True): """ Traverses any nested LabelledData object (i.e LabelledData objects containing LabelledData objects), applying the supplied function to each constituent element if the supplied specifications. The output of these function calls are collected and returned in the accumulator list. If specs is None, all constituent elements are processed. Otherwise, specs must be a list of type.group.label specs, types, and functions. """ accumulator = [] matches = specs is None if not matches: for spec in specs: matches = self.matches(spec) if matches: break if matches: accumulator.append(fn(self)) # Assumes composite objects are iterables if self._deep_indexable: for el in self: if el is None: continue accumulator += el.traverse(fn, specs, full_breadth) if not full_breadth: break return accumulator def map(self, map_fn, specs=None, clone=True): """ Recursively replaces elements using a map function when the specification applies. """ if specs and not isinstance(specs, list): specs = [specs] applies = specs is None or any(self.matches(spec) for spec in specs) if self._deep_indexable: deep_mapped = self.clone(shared_data=False) if clone else self for k, v in self.items(): new_val = v.map(map_fn, specs, clone) if new_val is not None: deep_mapped[k] = new_val if applies: deep_mapped = map_fn(deep_mapped) return deep_mapped else: return map_fn(self) if applies else self def __getstate__(self): """ When pickling, make sure to save the relevant style and plotting options as well. """ obj_dict = self.__dict__.copy() try: if Store.save_option_state and (obj_dict.get('id', None) is not None): custom_key = '_custom_option_%d' % obj_dict['id'] if custom_key not in obj_dict: obj_dict[custom_key] = {backend:s[obj_dict['id']] for backend,s in Store._custom_options.items() if obj_dict['id'] in s} else: obj_dict['id'] = None except: self.warning("Could not pickle custom style information.") return obj_dict def __setstate__(self, d): """ When unpickled, restore the saved style and plotting options to ViewableElement.options. """ d = param_aliases(d) try: load_options = Store.load_counter_offset is not None if load_options: matches = [k for k in d if k.startswith('_custom_option')] for match in matches: custom_id = int(match.split('_')[-1]) if not isinstance(d[match], dict): # Backward compatibility before multiple backends backend_info = {'matplotlib':d[match]} else: backend_info = d[match] for backend, info in backend_info.items(): if backend not in Store._custom_options: Store._custom_options[backend] = {} Store._custom_options[backend][Store.load_counter_offset + custom_id] = info d.pop(match) if d['id'] is not None: d['id'] += Store.load_counter_offset else: d['id'] = None except: self.warning("Could not unpickle custom style information.") self.__dict__.update(d) class Dimensioned(LabelledData): """ Dimensioned is a base class that allows the data contents of a class to be associated with dimensions. The contents associated with dimensions may be partitioned into one of three types * key dimensions: These are the dimensions that can be indexed via the __getitem__ method. Dimension objects supporting key dimensions must support indexing over these dimensions and may also support slicing. This list ordering of dimensions describes the positional components of each multi-dimensional indexing operation. For instance, if the key dimension names are 'weight' followed by 'height' for Dimensioned object 'obj', then obj[80,175] indexes a weight of 80 and height of 175. Accessed using either kdims or key_dimensions. * value dimensions: These dimensions correspond to any data held on the Dimensioned object not in the key dimensions. Indexing by value dimension is supported by dimension name (when there are multiple possible value dimensions); no slicing semantics is supported and all the data associated with that dimension will be returned at once. Note that it is not possible to mix value dimensions and deep dimensions. Accessed using either vdims or value_dimensions. * deep dimensions: These are dynamically computed dimensions that belong to other Dimensioned objects that are nested in the data. Objects that support this should enable the _deep_indexable flag. Note that it is not possible to mix value dimensions and deep dimensions. Accessed using either ddims or deep_dimensions. Dimensioned class support generalized methods for finding the range and type of values along a particular Dimension. The range method relies on the appropriate implementation of the dimension_values methods on subclasses. The index of an arbitrary dimension is its positional index in the list of all dimensions, starting with the key dimensions, followed by the value dimensions and ending with the deep dimensions. """ cdims = param.Dict(default=OrderedDict(), doc=""" The constant dimensions defined as a dictionary of Dimension:value pairs providing additional dimension information about the object. Aliased with constant_dimensions.""") kdims = param.List(bounds=(0, None), constant=True, doc=""" The key dimensions defined as list of dimensions that may be used in indexing (and potential slicing) semantics. The order of the dimensions listed here determines the semantics of each component of a multi-dimensional indexing operation. Aliased with key_dimensions.""") vdims = param.List(bounds=(0, None), constant=True, doc=""" The value dimensions defined as the list of dimensions used to describe the components of the data. If multiple value dimensions are supplied, a particular value dimension may be indexed by name after the key dimensions. Aliased with value_dimensions.""") group = param.String(default='Dimensioned', constant=True, doc=""" A string describing the data wrapped by the object.""") __abstract = True _dim_groups = ['kdims', 'vdims', 'cdims', 'ddims'] _dim_aliases = dict(key_dimensions='kdims', value_dimensions='vdims', constant_dimensions='cdims', deep_dimensions='ddims') # Long-name aliases @property def key_dimensions(self): return self.kdims @property def value_dimensions(self): return self.vdims @property def constant_dimensions(self): return self.cdims @property def deep_dimensions(self): return self.ddims def __init__(self, data, **params): for group in self._dim_groups+list(self._dim_aliases.keys()): if group in ['deep_dimensions', 'ddims']: continue if group in params: if group in self._dim_aliases: params[self._dim_aliases[group]] = params.pop(group) group = self._dim_aliases[group] if group == 'cdims': dimensions = {d if isinstance(d, Dimension) else Dimension(d): val for d, val in params.pop(group).items()} else: dimensions = [d if isinstance(d, Dimension) else Dimension(d) for d in params.pop(group)] params[group] = dimensions super(Dimensioned, self).__init__(data, **params) self.ndims = len(self.kdims) cdims = [(d.name, val) for d, val in self.cdims.items()] self._cached_constants = OrderedDict(cdims) self._settings = None self.redim = redim(self) def _valid_dimensions(self, dimensions): """Validates key dimension input Returns kdims if no dimensions are specified""" if dimensions is None: dimensions = self.kdims elif not isinstance(dimensions, list): dimensions = [dimensions] valid_dimensions = [] for dim in dimensions: if isinstance(dim, Dimension): dim = dim.name if dim not in self.kdims: raise Exception("Supplied dimensions %s not found." % dim) valid_dimensions.append(dim) return valid_dimensions @property def ddims(self): "The list of deep dimensions" if self._deep_indexable and self: return self.values()[0].dimensions() else: return [] def dimensions(self, selection='all', label=False): """ Provides convenient access to Dimensions on nested Dimensioned objects. Dimensions can be selected by their type, i.e. 'key' or 'value' dimensions. By default 'all' dimensions are returned. """ if label in ['name', True]: label = 'short' elif label == 'label': label = 'long' elif label: raise ValueError("label needs to be one of True, False, 'name' or 'label'") lambdas = {'k': (lambda x: x.kdims, {'full_breadth': False}), 'v': (lambda x: x.vdims, {}), 'c': (lambda x: x.cdims, {})} aliases = {'key': 'k', 'value': 'v', 'constant': 'c'} if selection == 'all': groups = [d for d in self._dim_groups if d != 'cdims'] dims = [dim for group in groups for dim in getattr(self, group)] elif isinstance(selection, list): dims = [dim for group in selection for dim in getattr(self, '%sdims' % aliases.get(group))] elif aliases.get(selection) in lambdas: selection = aliases.get(selection, selection) lmbd, kwargs = lambdas[selection] key_traversal = self.traverse(lmbd, **kwargs) dims = [dim for keydims in key_traversal for dim in keydims] else: raise KeyError("Invalid selection %r, valid selections include" "'all', 'value' and 'key' dimensions" % repr(selection)) return [(dim.label if label == 'long' else dim.name) if label else dim for dim in dims] def get_dimension(self, dimension, default=None, strict=False): """ Access a Dimension object by name or index. Returns the default value if the dimension is not found and strict is False. If strict is True, a KeyError is raised instead. """ all_dims = self.dimensions() if isinstance(dimension, Dimension): dimension = dimension.name if isinstance(dimension, int): if 0 <= dimension < len(all_dims): return all_dims[dimension] elif strict: raise KeyError("Dimension %s not found" % dimension) else: return default name_map = {dim.name: dim for dim in all_dims} name_map.update({dim.label: dim for dim in all_dims}) name_map.update({dimension_sanitizer(dim.name): dim for dim in all_dims}) if strict and dimension not in name_map: raise KeyError("Dimension %s not found" % dimension) else: return name_map.get(dimension, default) def get_dimension_index(self, dim): """ Returns the index of the requested dimension. """ if isinstance(dim, Dimension): dim = dim.name if isinstance(dim, int): if (dim < (self.ndims + len(self.vdims)) or dim < len(self.dimensions())): return dim else: return IndexError('Dimension index out of bounds') try: dimensions = self.kdims+self.vdims return [i for i, d in enumerate(dimensions) if d == dim][0] except IndexError: raise Exception("Dimension %s not found in %s." % (dim, self.__class__.__name__)) def get_dimension_type(self, dim): """ Returns the specified Dimension type if specified or if the dimension_values types are consistent otherwise None is returned. """ dim_obj = self.get_dimension(dim) if dim_obj and dim_obj.type is not None: return dim_obj.type dim_vals = [type(v) for v in self.dimension_values(dim)] if len(set(dim_vals)) == 1: return dim_vals[0] else: return None def __getitem__(self, key): """ Multi-dimensional indexing semantics is determined by the list of key dimensions. For instance, the first indexing component will index the first key dimension. After the key dimensions are given, *either* a value dimension name may follow (if there are multiple value dimensions) *or* deep dimensions may then be listed (for applicable deep dimensions). """ return self def select(self, selection_specs=None, **kwargs): """ Allows slicing or indexing into the Dimensioned object by supplying the dimension and index/slice as key value pairs. Select descends recursively through the data structure applying the key dimension selection. The 'value' keyword allows selecting the value dimensions on objects which have any declared. The selection may also be selectively applied to specific objects by supplying the selection_specs as an iterable of type.group.label specs, types or functions. """ # Apply all indexes applying on this object vdims = self.vdims+['value'] if self.vdims else [] kdims = self.kdims local_kwargs = {k: v for k, v in kwargs.items() if k in kdims+vdims} # Check selection_spec applies if selection_specs is not None: if not isinstance(selection_specs, (list, tuple)): selection_specs = [selection_specs] matches = any(self.matches(spec) for spec in selection_specs) else: matches = True # Apply selection to self if local_kwargs and matches: ndims = self.ndims if any(d in self.vdims for d in kwargs): ndims = len(self.kdims+self.vdims) select = [slice(None) for _ in range(ndims)] for dim, val in local_kwargs.items(): if dim == 'value': select += [val] else: if isinstance(val, tuple): val = slice(*val) select[self.get_dimension_index(dim)] = val if self._deep_indexable: selection = self.get(tuple(select), None) if selection is None: selection = self.clone(shared_data=False) else: selection = self[tuple(select)] else: selection = self if not isinstance(selection, Dimensioned): return selection elif type(selection) is not type(self) and isinstance(selection, Dimensioned): # Apply the selection on the selected object of a different type dimensions = selection.dimensions() + ['value'] if any(kw in dimensions for kw in kwargs): selection = selection.select(selection_specs, **kwargs) elif isinstance(selection, Dimensioned) and selection._deep_indexable: # Apply the deep selection on each item in local selection items = [] for k, v in selection.items(): dimensions = v.dimensions() + ['value'] if any(kw in dimensions for kw in kwargs): items.append((k, v.select(selection_specs, **kwargs))) else: items.append((k, v)) selection = selection.clone(items) return selection def dimension_values(self, dimension, expanded=True, flat=True): """ Returns the values along the specified dimension. This method must be implemented for all Dimensioned type. """ val = self._cached_constants.get(dimension, None) if val: return np.array([val]) else: raise Exception("Dimension %s not found in %s." % (dimension, self.__class__.__name__)) def range(self, dimension, data_range=True): """ Returns the range of values along the specified dimension. If data_range is True, the data may be used to try and infer the appropriate range. Otherwise, (None,None) is returned to indicate that no range is defined. """ dimension = self.get_dimension(dimension) if dimension is None: return (None, None) if None not in dimension.range: return dimension.range elif data_range: if dimension in self.kdims+self.vdims: dim_vals = self.dimension_values(dimension.name) drange = find_range(dim_vals) else: dname = dimension.name match_fn = lambda x: dname in x.kdims + x.vdims range_fn = lambda x: x.range(dname) ranges = self.traverse(range_fn, [match_fn]) drange = max_range(ranges) soft_range = [r for r in dimension.soft_range if r is not None] if soft_range: drange = max_range([drange, soft_range]) else: drange = dimension.soft_range if dimension.range[0] is not None: return (dimension.range[0], drange[1]) elif dimension.range[1] is not None: return (drange[0], dimension.range[1]) else: return drange def __repr__(self): return PrettyPrinter.pprint(self) def __str__(self): return repr(self) def __unicode__(self): return unicode(PrettyPrinter.pprint(self)) def __call__(self, options=None, **kwargs): if config.warn_options_call: self.warning('Use of __call__ to set options will be deprecated ' 'in future. Use the equivalent opts method instead.') return self.opts(options, **kwargs) def opts(self, options=None, **kwargs): """ Apply the supplied options to a clone of the object which is then returned. Note that if no options are supplied at all, all ids are reset. """ from ..util.parser import OptsSpec if isinstance(options, basestring): try: options = OptsSpec.parse(options) except SyntaxError: options = OptsSpec.parse( '{clsname} {options}'.format(clsname=self.__class__.__name__, options=options)) groups = set(Store.options().groups.keys()) if kwargs and set(kwargs) <= groups: if not all(isinstance(v, dict) for v in kwargs.values()): raise Exception("The %s options must be specified using dictionary groups" % ','.join(repr(k) for k in kwargs.keys())) # Check whether the user is specifying targets (such as 'Image.Foo') entries = Store.options().children targets = [k.split('.')[0] in entries for grp in kwargs.values() for k in grp] if any(targets) and not all(targets): raise Exception("Cannot mix target specification keys such as 'Image' with non-target keywords.") elif not any(targets): # Not targets specified - add current object as target sanitized_group = group_sanitizer(self.group) if self.label: identifier = ('%s.%s.%s' % (self.__class__.__name__, sanitized_group, label_sanitizer(self.label))) elif sanitized_group != self.__class__.__name__: identifier = '%s.%s' % (self.__class__.__name__, sanitized_group) else: identifier = self.__class__.__name__ kwargs = {k:{identifier:v} for k,v in kwargs.items()} if options is None and kwargs=={}: deep_clone = self.map(lambda x: x.clone(id=None)) else: deep_clone = self.map(lambda x: x.clone(id=x.id)) StoreOptions.set_options(deep_clone, options, **kwargs) return deep_clone class ViewableElement(Dimensioned): """ A ViewableElement is a dimensioned datastructure that may be associated with a corresponding atomic visualization. An atomic visualization will display the data on a single set of axes (i.e. excludes multiple subplots that are displayed at once). The only new parameter introduced by ViewableElement is the title associated with the object for display. """ __abstract = True _auxiliary_component = False group = param.String(default='ViewableElement', constant=True)
1
18,659
I don't quite get why there would be key errors: ``deep_mapped`` is a clone of ``self`` and ``k`` comes from ``self.items()`` so why would the key ever be rejected?
holoviz-holoviews
py
@@ -427,6 +427,15 @@ func (te *transactorEndpoint) Withdraw(c *gin.Context) { } chainID := config.GetInt64(config.FlagChainID) + if req.ChainID != 0 { + if _, ok := registry.Chains()[req.ChainID]; !ok { + utils.SendError(resp, errors.New("Unsupported chain"), http.StatusBadRequest) + return + } + + chainID = req.ChainID + } + err = te.promiseSettler.Withdraw(chainID, identity.FromAddress(req.ProviderID), common.HexToAddress(req.HermesID), common.HexToAddress(req.Beneficiary)) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError)
1
/* * Copyright (C) 2019 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package endpoints import ( "encoding/json" "fmt" "math/big" "net/http" "github.com/gin-gonic/gin" "github.com/ethereum/go-ethereum/common" "github.com/mysteriumnetwork/node/config" "github.com/mysteriumnetwork/node/core/payout" "github.com/mysteriumnetwork/node/identity" "github.com/mysteriumnetwork/node/identity/registry" "github.com/mysteriumnetwork/node/session/pingpong" "github.com/mysteriumnetwork/node/tequilapi/contract" "github.com/mysteriumnetwork/node/tequilapi/utils" "github.com/pkg/errors" "github.com/rs/zerolog/log" "github.com/vcraescu/go-paginator/adapter" ) // Transactor represents interface to Transactor service type Transactor interface { FetchRegistrationFees(chainID int64) (registry.FeesResponse, error) FetchSettleFees(chainID int64) (registry.FeesResponse, error) FetchStakeDecreaseFee(chainID int64) (registry.FeesResponse, error) RegisterIdentity(id string, stake, fee *big.Int, beneficiary string, chainID int64, referralToken *string) error DecreaseStake(id string, chainID int64, amount, transactorFee *big.Int) error GetTokenReward(referralToken string) (registry.TokenRewardResponse, error) GetReferralToken(id common.Address) (string, error) ReferralTokenAvailable(id common.Address) error RegistrationTokenReward(token string) (*big.Int, error) } // promiseSettler settles the given promises type promiseSettler interface { ForceSettle(chainID int64, providerID identity.Identity, hermesID common.Address) error GetHermesFee(chainID int64, id common.Address) (uint16, error) SettleIntoStake(chainID int64, providerID identity.Identity, hermesID common.Address) error Withdraw(chainID int64, providerID identity.Identity, hermesID, beneficiary common.Address) error } type addressProvider interface { GetActiveHermes(chainID int64) (common.Address, error) } type settlementHistoryProvider interface { List(pingpong.SettlementHistoryFilter) ([]pingpong.SettlementHistoryEntry, error) } type transactorEndpoint struct { transactor Transactor identityRegistry identityRegistry promiseSettler promiseSettler settlementHistoryProvider settlementHistoryProvider addressProvider addressProvider addressStorage *payout.AddressStorage } // NewTransactorEndpoint creates and returns transactor endpoint func NewTransactorEndpoint( transactor Transactor, identityRegistry identityRegistry, promiseSettler promiseSettler, settlementHistoryProvider settlementHistoryProvider, addressProvider addressProvider, ) *transactorEndpoint { return &transactorEndpoint{ transactor: transactor, identityRegistry: identityRegistry, promiseSettler: promiseSettler, settlementHistoryProvider: settlementHistoryProvider, addressProvider: addressProvider, } } // swagger:operation GET /transactor/fees FeesDTO // --- // summary: Returns fees // description: Returns fees applied by Transactor // responses: // 200: // description: fees applied by Transactor // schema: // "$ref": "#/definitions/FeesDTO" // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) TransactorFees(c *gin.Context) { resp := c.Writer chainID := config.GetInt64(config.FlagChainID) registrationFees, err := te.transactor.FetchRegistrationFees(chainID) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } settlementFees, err := te.transactor.FetchSettleFees(chainID) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } decreaseStakeFees, err := te.transactor.FetchStakeDecreaseFee(chainID) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } hermes, err := te.addressProvider.GetActiveHermes(chainID) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } hermesFees, err := te.promiseSettler.GetHermesFee(chainID, hermes) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } f := contract.FeesDTO{ Registration: registrationFees.Fee, Settlement: settlementFees.Fee, Hermes: hermesFees, DecreaseStake: decreaseStakeFees.Fee, } utils.WriteAsJSON(f, resp) } // swagger:operation POST /transactor/settle/sync SettleSync // --- // summary: forces the settlement of promises for the given provider and hermes // description: Forces a settlement for the hermes promises and blocks until the settlement is complete. // parameters: // - in: body // name: body // description: settle request body // schema: // $ref: "#/definitions/SettleRequestDTO" // responses: // 202: // description: settle request accepted // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) SettleSync(c *gin.Context) { resp := c.Writer request := c.Request err := te.settle(request, te.promiseSettler.ForceSettle) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } resp.WriteHeader(http.StatusOK) } // swagger:operation POST /transactor/settle/async SettleAsync // --- // summary: forces the settlement of promises for the given provider and hermes // description: Forces a settlement for the hermes promises. Does not wait for completion. // parameters: // - in: body // name: body // description: settle request body // schema: // $ref: "#/definitions/SettleRequestDTO" // responses: // 202: // description: settle request accepted // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) SettleAsync(c *gin.Context) { resp := c.Writer request := c.Request err := te.settle(request, func(chainID int64, provider identity.Identity, hermes common.Address) error { go func() { err := te.promiseSettler.ForceSettle(chainID, provider, hermes) if err != nil { log.Error().Err(err).Msgf("could not settle provider(%q) promises", provider.Address) } }() return nil }) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } resp.WriteHeader(http.StatusAccepted) } func (te *transactorEndpoint) settle(request *http.Request, settler func(int64, identity.Identity, common.Address) error) error { req := contract.SettleRequest{} err := json.NewDecoder(request.Body).Decode(&req) if err != nil { return errors.Wrap(err, "failed to unmarshal settle request") } chainID := config.GetInt64(config.FlagChainID) return errors.Wrap(settler(chainID, identity.FromAddress(req.ProviderID), common.HexToAddress(req.HermesID)), "settling failed") } // swagger:operation POST /identities/{id}/register Identity RegisterIdentity // --- // summary: Registers identity // description: Registers identity on Mysterium Network smart contracts using Transactor // parameters: // - name: id // in: path // description: Identity address to register // type: string // required: true // - in: body // name: body // description: all body parameters a optional // schema: // $ref: "#/definitions/IdentityRegisterRequestDTO" // responses: // 200: // description: Payout info registered // 400: // description: Bad request // schema: // "$ref": "#/definitions/ErrorMessageDTO" // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) RegisterIdentity(c *gin.Context) { resp := c.Writer request := c.Request params := c.Params id := identity.FromAddress(params.ByName("id")) chainID := config.GetInt64(config.FlagChainID) req := &contract.IdentityRegisterRequest{} err := json.NewDecoder(request.Body).Decode(&req) if err != nil { utils.SendError(resp, errors.Wrap(err, "failed to parse identity registration request"), http.StatusBadRequest) return } registrationStatus, err := te.identityRegistry.GetRegistrationStatus(chainID, id) if err != nil { log.Err(err).Stack().Msgf("could not check registration status for ID: %s, %+v", id.Address, req) utils.SendError(resp, errors.Wrap(err, "could not check registration status"), http.StatusInternalServerError) return } switch registrationStatus { case registry.InProgress, registry.Registered: log.Info().Msgf("identity %q registration is in status %s, aborting...", id.Address, registrationStatus) utils.SendErrorMessage(resp, "Identity already registered", http.StatusConflict) return } regFee := big.NewInt(0) if req.ReferralToken == nil { rf, err := te.transactor.FetchRegistrationFees(chainID) if err != nil { utils.SendError(resp, fmt.Errorf("failed to get registration fees %w", err), http.StatusInternalServerError) return } regFee = rf.Fee } err = te.transactor.RegisterIdentity(id.Address, big.NewInt(0), regFee, "", chainID, req.ReferralToken) if err != nil { log.Err(err).Msgf("Failed identity registration request for ID: %s, %+v", id.Address, req) utils.SendError(resp, errors.Wrap(err, "failed identity registration request"), http.StatusInternalServerError) return } resp.WriteHeader(http.StatusAccepted) } // swagger:operation GET /settle/history settlementList // --- // summary: Returns settlement history // description: Returns settlement history // responses: // 200: // description: Returns settlement history // schema: // "$ref": "#/definitions/SettlementListResponse" // 400: // description: Bad request // schema: // "$ref": "#/definitions/ErrorMessageDTO" // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) SettlementHistory(c *gin.Context) { resp := c.Writer req := c.Request query := contract.NewSettlementListQuery() if errors := query.Bind(req); errors.HasErrors() { utils.SendValidationErrorMessage(resp, errors) return } settlementsAll, err := te.settlementHistoryProvider.List(query.ToFilter()) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } var settlements []pingpong.SettlementHistoryEntry p := utils.NewPaginator(adapter.NewSliceAdapter(settlementsAll), query.PageSize, query.PageSize) if err := p.Results(&settlements); err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } response := contract.NewSettlementListResponse(settlements, p) utils.WriteAsJSON(response, resp) } // swagger:operation POST /transactor/stake/decrease Decrease Stake // --- // summary: Decreases stake // description: Decreases stake on eth blockchain via the mysterium transactor. // parameters: // - in: body // name: body // description: decrease stake request // schema: // $ref: "#/definitions/DecreaseStakeRequest" // responses: // 200: // description: Payout info registered // 400: // description: Bad request // schema: // "$ref": "#/definitions/ErrorMessageDTO" // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) DecreaseStake(c *gin.Context) { resp := c.Writer request := c.Request var req contract.DecreaseStakeRequest err := json.NewDecoder(request.Body).Decode(&req) if err != nil { utils.SendError(resp, errors.Wrap(err, "failed to parse decrease stake"), http.StatusBadRequest) return } chainID := config.GetInt64(config.FlagChainID) fees, err := te.transactor.FetchStakeDecreaseFee(chainID) if err != nil { utils.SendError(resp, errors.Wrap(err, "failed get stake decrease fee"), http.StatusInternalServerError) return } err = te.transactor.DecreaseStake(req.ID, chainID, req.Amount, fees.Fee) if err != nil { log.Err(err).Msgf("Failed decreases stake request for ID: %s, %+v", req.ID, req) utils.SendError(resp, errors.Wrap(err, "failed decreases stake request"), http.StatusInternalServerError) return } resp.WriteHeader(http.StatusAccepted) } // swagger:operation POST /transactor/settle/withdraw Withdraw // --- // summary: Asks to perform withdrawal to l1. // description: Asks to perform withdrawal to l1. // parameters: // - in: body // name: body // description: withdraw request body // schema: // $ref: "#/definitions/WithdrawRequestDTO" // responses: // 202: // description: withdraw request accepted // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) Withdraw(c *gin.Context) { resp := c.Writer request := c.Request req := contract.WithdrawRequest{} err := json.NewDecoder(request.Body).Decode(&req) if err != nil { utils.SendError(resp, err, http.StatusBadRequest) return } chainID := config.GetInt64(config.FlagChainID) err = te.promiseSettler.Withdraw(chainID, identity.FromAddress(req.ProviderID), common.HexToAddress(req.HermesID), common.HexToAddress(req.Beneficiary)) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } resp.WriteHeader(http.StatusOK) } // swagger:operation POST /transactor/stake/increase/sync StakeIncreaseSync // --- // summary: forces the settlement with stake increase of promises for the given provider and hermes. // description: Forces a settlement with stake increase for the hermes promises and blocks until the settlement is complete. // parameters: // - in: body // name: body // description: settle request body // schema: // $ref: "#/definitions/SettleRequestDTO" // responses: // 202: // description: settle request accepted // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) SettleIntoStakeSync(c *gin.Context) { resp := c.Writer request := c.Request err := te.settle(request, te.promiseSettler.SettleIntoStake) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } resp.WriteHeader(http.StatusOK) } // swagger:operation POST /transactor/stake/increase/async StakeIncreaseAsync // --- // summary: forces the settlement with stake increase of promises for the given provider and hermes. // description: Forces a settlement with stake increase for the hermes promises and does not block. // parameters: // - in: body // name: body // description: settle request body // schema: // $ref: "#/definitions/SettleRequestDTO" // responses: // 202: // description: settle request accepted // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) SettleIntoStakeAsync(c *gin.Context) { resp := c.Writer request := c.Request err := te.settle(request, func(chainID int64, provider identity.Identity, hermes common.Address) error { go func() { err := te.promiseSettler.SettleIntoStake(chainID, provider, hermes) if err != nil { log.Error().Err(err).Msgf("could not settle into stake provider(%q) promises", provider.Address) } }() return nil }) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } resp.WriteHeader(http.StatusOK) } // swagger:operation POST /transactor/token/{token}/reward Reward // --- // summary: Returns the amount of reward for a token // parameters: // - in: path // name: token // description: Token for which to lookup the reward // type: string // required: true // responses: // 200: // description: Token Reward // schema: // "$ref": "#/definitions/TokenRewardAmount" // 500: // description: Internal server error // schema: // "$ref": "#/definitions/ErrorMessageDTO" func (te *transactorEndpoint) TokenRewardAmount(c *gin.Context) { resp := c.Writer params := c.Params token := params.ByName("token") reward, err := te.transactor.RegistrationTokenReward(token) if err != nil { utils.SendError(resp, err, http.StatusInternalServerError) return } if reward == nil { utils.SendError(resp, errors.New("no reward for token"), http.StatusInternalServerError) return } utils.WriteAsJSON(contract.TokenRewardAmount{ Amount: reward, }, resp) } // swagger:operation GET /transactor/chains Chains // --- // summary: Returns available chain map // responses: // 200: // description: Chain map func (te *transactorEndpoint) AvailableChains(c *gin.Context) { chains := registry.Chains() result := map[int64]string{} for _, id := range []int64{ config.FlagChain1ChainID.Value, config.FlagChain2ChainID.Value, } { if name, ok := chains[id]; ok { result[id] = name } } c.JSON(http.StatusOK, result) } // AddRoutesForTransactor attaches Transactor endpoints to router func AddRoutesForTransactor( identityRegistry identityRegistry, transactor Transactor, promiseSettler promiseSettler, settlementHistoryProvider settlementHistoryProvider, addressProvider addressProvider, ) func(*gin.Engine) error { te := NewTransactorEndpoint(transactor, identityRegistry, promiseSettler, settlementHistoryProvider, addressProvider) return func(e *gin.Engine) error { idGroup := e.Group("/identities") { idGroup.POST("/:id/register", te.RegisterIdentity) } transGroup := e.Group("/transactor") { transGroup.GET("/fees", te.TransactorFees) transGroup.POST("/settle/sync", te.SettleSync) transGroup.POST("/settle/async", te.SettleAsync) transGroup.GET("/settle/history", te.SettlementHistory) transGroup.POST("/stake/increase/sync", te.SettleIntoStakeSync) transGroup.POST("/stake/increase/async", te.SettleIntoStakeAsync) transGroup.POST("/stake/decrease", te.DecreaseStake) transGroup.POST("/settle/withdraw", te.Withdraw) transGroup.GET("/token/:token/reward", te.TokenRewardAmount) transGroup.GET("/chains", te.AvailableChains) } return nil } }
1
17,285
this chain ID determines only from which chain to withdraw, therefore your changes do not accomplish what you want them to accomplish. You'll need changes to `func (aps *hermesPromiseSettler) Withdraw(chainID int64, providerID identity.Identity, hermesID, beneficiary common.Address) error`. The method probably has to include two chain ids: to and from. Currently, the method internally uses: `aps.config.L1ChainID` -> the chain to withdraw to `chainID` -> the chainID that was passed as the chain that the withdrawal is originating from.
mysteriumnetwork-node
go
@@ -57,7 +57,11 @@ func (TraceContext) Inject(ctx context.Context, supplier propagation.HTTPSupplie } func (tc TraceContext) Extract(ctx context.Context, supplier propagation.HTTPSupplier) context.Context { - return ContextWithRemoteSpanContext(ctx, tc.extract(supplier)) + sc := tc.extract(supplier) + if !sc.IsValid() { + return ctx + } + return ContextWithRemoteSpanContext(ctx, sc) } func (TraceContext) extract(supplier propagation.HTTPSupplier) core.SpanContext {
1
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package trace import ( "context" "encoding/hex" "fmt" "regexp" "strings" "go.opentelemetry.io/otel/api/core" "go.opentelemetry.io/otel/api/propagation" ) const ( supportedVersion = 0 maxVersion = 254 traceparentHeader = "Traceparent" ) // TraceContext propagates SpanContext in W3C TraceContext format. //nolint:golint type TraceContext struct{} var _ propagation.HTTPPropagator = TraceContext{} var traceCtxRegExp = regexp.MustCompile("^[0-9a-f]{2}-[a-f0-9]{32}-[a-f0-9]{16}-[a-f0-9]{2}-?") // DefaultHTTPPropagator returns the default trace HTTP propagator. func DefaultHTTPPropagator() propagation.HTTPPropagator { return TraceContext{} } func (TraceContext) Inject(ctx context.Context, supplier propagation.HTTPSupplier) { sc := SpanFromContext(ctx).SpanContext() if !sc.IsValid() { return } h := fmt.Sprintf("%.2x-%s-%s-%.2x", supportedVersion, sc.TraceID, sc.SpanID, sc.TraceFlags&core.TraceFlagsSampled) supplier.Set(traceparentHeader, h) } func (tc TraceContext) Extract(ctx context.Context, supplier propagation.HTTPSupplier) context.Context { return ContextWithRemoteSpanContext(ctx, tc.extract(supplier)) } func (TraceContext) extract(supplier propagation.HTTPSupplier) core.SpanContext { h := supplier.Get(traceparentHeader) if h == "" { return core.EmptySpanContext() } h = strings.Trim(h, "-") if !traceCtxRegExp.MatchString(h) { return core.EmptySpanContext() } sections := strings.Split(h, "-") if len(sections) < 4 { return core.EmptySpanContext() } if len(sections[0]) != 2 { return core.EmptySpanContext() } ver, err := hex.DecodeString(sections[0]) if err != nil { return core.EmptySpanContext() } version := int(ver[0]) if version > maxVersion { return core.EmptySpanContext() } if version == 0 && len(sections) != 4 { return core.EmptySpanContext() } if len(sections[1]) != 32 { return core.EmptySpanContext() } var sc core.SpanContext sc.TraceID, err = core.TraceIDFromHex(sections[1][:32]) if err != nil { return core.EmptySpanContext() } if len(sections[2]) != 16 { return core.EmptySpanContext() } sc.SpanID, err = core.SpanIDFromHex(sections[2]) if err != nil { return core.EmptySpanContext() } if len(sections[3]) != 2 { return core.EmptySpanContext() } opts, err := hex.DecodeString(sections[3]) if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { return core.EmptySpanContext() } sc.TraceFlags = opts[0] &^ core.TraceFlagsUnused if !sc.IsValid() { return core.EmptySpanContext() } return sc } func (TraceContext) GetAllKeys() []string { return []string{traceparentHeader} }
1
12,028
If instead we had `TraceContext.extract` return a bool value as a second return value, we could avoid the byte array comparison in `TraceID.IsValid`. Did you consider that alternative?
open-telemetry-opentelemetry-go
go
@@ -22,8 +22,8 @@ #include "depotchest.h" #include "tools.h" -DepotChest::DepotChest(uint16_t type) : - Container(type), maxDepotItems(2000) {} +DepotChest::DepotChest(uint16_t type, bool paginated /*= true*/) : + Container(type, items[type].maxItems, true, paginated), maxDepotItems(2000) {} ReturnValue DepotChest::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature* actor/* = nullptr*/) const
1
/** * The Forgotten Server - a free and open-source MMORPG server emulator * Copyright (C) 2019 Mark Samman <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "otpch.h" #include "depotchest.h" #include "tools.h" DepotChest::DepotChest(uint16_t type) : Container(type), maxDepotItems(2000) {} ReturnValue DepotChest::queryAdd(int32_t index, const Thing& thing, uint32_t count, uint32_t flags, Creature* actor/* = nullptr*/) const { const Item* item = thing.getItem(); if (!item) { return RETURNVALUE_NOTPOSSIBLE; } bool skipLimit = hasBitSet(FLAG_NOLIMIT, flags); if (!skipLimit) { int32_t addCount = 0; if ((item->isStackable() && item->getItemCount() != count)) { addCount = 1; } if (item->getTopParent() != this) { if (const Container* container = item->getContainer()) { addCount = container->getItemHoldingCount() + 1; } else { addCount = 1; } } if (getItemHoldingCount() + addCount > maxDepotItems) { return RETURNVALUE_DEPOTISFULL; } } return Container::queryAdd(index, thing, count, flags, actor); } void DepotChest::postAddNotification(Thing* thing, const Cylinder* oldParent, int32_t index, cylinderlink_t) { Cylinder* parent = getParent(); if (parent) { parent->postAddNotification(thing, oldParent, index, LINK_PARENT); } } void DepotChest::postRemoveNotification(Thing* thing, const Cylinder* newParent, int32_t index, cylinderlink_t) { Cylinder* parent = getParent(); if (parent) { parent->postRemoveNotification(thing, newParent, index, LINK_PARENT); } } Cylinder* DepotChest::getParent() const { if (parent) { return parent->getParent(); } return nullptr; }
1
19,677
here shouldn't we use `depotFreeLimit`?
otland-forgottenserver
cpp
@@ -0,0 +1,3 @@ +# A secret token used to encrypt user_id's in the Bookmarks#export callback URL +# functionality, for example in Refworks export of Bookmarks. +Rails.application.config.blacklight_export_secret_token = '<%= SecureRandom.hex(64) %>'
1
1
5,242
Could we use the Rails application's secret token instead? Do we actually need our own here?
projectblacklight-blacklight
rb
@@ -80,6 +80,10 @@ class GroupByTest(ReusedSQLTestCase, TestUtils): self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.b, as_index=False)) + self.assertRaises(ValueError, lambda: kdf.groupby('a', axis=1)) + self.assertRaises(ValueError, lambda: kdf.groupby('a', 'b')) + self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.a, kdf.b)) + # we can't use column name/names as a parameter `by` for `SeriesGroupBy`. self.assertRaises(KeyError, lambda: kdf.a.groupby(by='a')) self.assertRaises(KeyError, lambda: kdf.a.groupby(by=['a', 'b']))
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest import inspect from distutils.version import LooseVersion from itertools import product import numpy as np import pandas as pd from databricks import koalas as ks from databricks.koalas.config import option_context from databricks.koalas.exceptions import PandasNotImplementedError from databricks.koalas.missing.groupby import _MissingPandasLikeDataFrameGroupBy, \ _MissingPandasLikeSeriesGroupBy from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils from databricks.koalas.groupby import _is_multi_agg_with_relabel class GroupByTest(ReusedSQLTestCase, TestUtils): def test_groupby(self): pdf = pd.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7], 'b': [4, 2, 7, 3, 3, 1, 1, 1, 2], 'c': [4, 2, 7, 3, None, 1, 1, 1, 2], 'd': list('abcdefght')}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9]) kdf = ks.from_pandas(pdf) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values('a').reset_index(drop=True) self.assert_eq(sort(kdf.groupby('a', as_index=as_index).sum()), sort(pdf.groupby('a', as_index=as_index).sum())) self.assert_eq(sort(kdf.groupby('a', as_index=as_index).b.sum()), sort(pdf.groupby('a', as_index=as_index).b.sum())) self.assert_eq(sort(kdf.groupby('a', as_index=as_index)['b'].sum()), sort(pdf.groupby('a', as_index=as_index)['b'].sum())) self.assert_eq(sort(kdf.groupby('a', as_index=as_index)[['b', 'c']].sum()), sort(pdf.groupby('a', as_index=as_index)[['b', 'c']].sum())) self.assert_eq(sort(kdf.groupby('a', as_index=as_index)[[]].sum()), sort(pdf.groupby('a', as_index=as_index)[[]].sum())) self.assert_eq(sort(kdf.groupby('a', as_index=as_index)['c'].sum()), sort(pdf.groupby('a', as_index=as_index)['c'].sum())) self.assert_eq(kdf.groupby('a').a.sum().sort_index(), pdf.groupby('a').a.sum().sort_index()) self.assert_eq(kdf.groupby('a')['a'].sum().sort_index(), pdf.groupby('a')['a'].sum().sort_index()) self.assert_eq(kdf.groupby('a')[['a']].sum().sort_index(), pdf.groupby('a')[['a']].sum().sort_index()) self.assert_eq(kdf.groupby('a')[['a', 'c']].sum().sort_index(), pdf.groupby('a')[['a', 'c']].sum().sort_index()) self.assert_eq(kdf.a.groupby(kdf.b).sum().sort_index(), pdf.a.groupby(pdf.b).sum().sort_index()) self.assertRaises(ValueError, lambda: kdf.groupby('a', as_index=False).a) self.assertRaises(ValueError, lambda: kdf.groupby('a', as_index=False)['a']) self.assertRaises(ValueError, lambda: kdf.groupby('a', as_index=False)[['a']]) self.assertRaises(ValueError, lambda: kdf.groupby('a', as_index=False)[['a', 'c']]) self.assertRaises(ValueError, lambda: kdf.groupby(0, as_index=False)[['a', 'c']]) self.assertRaises(KeyError, lambda: kdf.groupby([0], as_index=False)[['a', 'c']]) self.assertRaises(TypeError, lambda: kdf.a.groupby(kdf.b, as_index=False)) # we can't use column name/names as a parameter `by` for `SeriesGroupBy`. self.assertRaises(KeyError, lambda: kdf.a.groupby(by='a')) self.assertRaises(KeyError, lambda: kdf.a.groupby(by=['a', 'b'])) self.assertRaises(KeyError, lambda: kdf.a.groupby(by=('a', 'b'))) # we can't use DataFrame as a parameter `by` for `DataFrameGroupBy`/`SeriesGroupBy`. self.assertRaises(ValueError, lambda: kdf.groupby(kdf)) self.assertRaises(ValueError, lambda: kdf.a.groupby(kdf)) self.assertRaises(ValueError, lambda: kdf.a.groupby((kdf,))) def test_groupby_multiindex_columns(self): pdf = pd.DataFrame({('x', 'a'): [1, 2, 6, 4, 4, 6, 4, 3, 7], ('x', 'b'): [4, 2, 7, 3, 3, 1, 1, 1, 2], ('y', 'c'): [4, 2, 7, 3, None, 1, 1, 1, 2], ('z', 'd'): list('abcdefght')}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9]) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby(('x', 'a')).sum().sort_index(), pdf.groupby(('x', 'a')).sum().sort_index()) self.assert_eq(kdf.groupby(('x', 'a'), as_index=False).sum() .sort_values(('x', 'a')).reset_index(drop=True), pdf.groupby(('x', 'a'), as_index=False).sum() .sort_values(('x', 'a')).reset_index(drop=True)) self.assert_eq(kdf.groupby(('x', 'a'))[[('y', 'c')]].sum().sort_index(), pdf.groupby(('x', 'a'))[[('y', 'c')]].sum().sort_index()) self.assert_eq(kdf[('x', 'a')].groupby(kdf[('x', 'b')]).sum().sort_index(), pdf[('x', 'a')].groupby(pdf[('x', 'b')]).sum().sort_index()) def test_split_apply_combine_on_series(self): pdf = pd.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7], 'b': [4, 2, 7, 3, 3, 1, 1, 1, 2], 'c': [4, 2, 7, 3, None, 1, 1, 1, 2], 'd': list('abcdefght')}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9]) kdf = ks.from_pandas(pdf) funcs = [(False, ['sum', 'min', 'max', 'count', 'mean', 'first', 'last']), (True, ['var', 'std'])] funcs = [(almost, f) for almost, fs in funcs for f in fs] for ddkey, pdkey in [('b', 'b'), (kdf.b, pdf.b), (kdf.b + 1, pdf.b + 1)]: for almost, func in funcs: self.assert_eq(getattr(kdf.groupby(ddkey).a, func)().sort_index(), getattr(pdf.groupby(pdkey).a, func)().sort_index(), almost=almost) self.assert_eq(getattr(kdf.groupby(ddkey), func)().sort_index(), getattr(pdf.groupby(pdkey), func)().sort_index(), almost=almost) for ddkey, pdkey in [(kdf.b, pdf.b), (kdf.b + 1, pdf.b + 1)]: for almost, func in funcs: self.assert_eq(getattr(kdf.a.groupby(ddkey), func)().sort_index(), getattr(pdf.a.groupby(pdkey), func)().sort_index(), almost=almost) self.assert_eq(getattr((kdf.a + 1).groupby(ddkey), func)().sort_index(), getattr((pdf.a + 1).groupby(pdkey), func)().sort_index(), almost=almost) self.assert_eq(getattr((kdf.b + 1).groupby(ddkey), func)().sort_index(), getattr((pdf.b + 1).groupby(pdkey), func)().sort_index(), almost=almost) for i in [0, 4, 7]: for almost, func in funcs: self.assert_eq(getattr(kdf.groupby(kdf.b > i).a, func)().sort_index(), getattr(pdf.groupby(pdf.b > i).a, func)().sort_index(), almost=almost) self.assert_eq(getattr(kdf.groupby(kdf.b > i), func)().sort_index(), getattr(pdf.groupby(pdf.b > i), func)().sort_index(), almost=almost) def test_aggregate(self): pdf = pd.DataFrame({'A': [1, 1, 2, 2], 'B': [1, 2, 3, 4], 'C': [0.362, 0.227, 1.267, -0.562]}) kdf = ks.from_pandas(pdf) for as_index in [True, False]: stats_kdf = kdf.groupby('A', as_index=as_index).agg({'B': 'min', 'C': 'sum'}) stats_pdf = pdf.groupby('A', as_index=as_index).agg({'B': 'min', 'C': 'sum'}) self.assert_eq(stats_kdf.sort_values(by=['B', 'C']).reset_index(drop=True), stats_pdf.sort_values(by=['B', 'C']).reset_index(drop=True)) stats_kdf = kdf.groupby('A', as_index=as_index).agg({'B': ['min', 'max'], 'C': 'sum'}) stats_pdf = pdf.groupby('A', as_index=as_index).agg({'B': ['min', 'max'], 'C': 'sum'}) self.assert_eq( stats_kdf.sort_values( by=[('B', 'min'), ('B', 'max'), ('C', 'sum')] ).reset_index(drop=True), stats_pdf.sort_values( by=[('B', 'min'), ('B', 'max'), ('C', 'sum')] ).reset_index(drop=True)) expected_error_message = (r"aggs must be a dict mapping from column name \(string or " r"tuple\) to aggregate functions \(string or list of strings\).") with self.assertRaisesRegex(ValueError, expected_error_message): kdf.groupby('A', as_index=as_index).agg(0) # multi-index columns columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) pdf.columns = columns kdf.columns = columns for as_index in [True, False]: stats_kdf = kdf.groupby( ('X', 'A'), as_index=as_index).agg({('X', 'B'): 'min', ('Y', 'C'): 'sum'}) stats_pdf = pdf.groupby( ('X', 'A'), as_index=as_index).agg({('X', 'B'): 'min', ('Y', 'C'): 'sum'}) self.assert_eq( stats_kdf.sort_values(by=[('X', 'B'), ('Y', 'C')]).reset_index(drop=True), stats_pdf.sort_values(by=[('X', 'B'), ('Y', 'C')]).reset_index(drop=True)) stats_kdf = kdf.groupby( ('X', 'A')).agg({('X', 'B'): ['min', 'max'], ('Y', 'C'): 'sum'}) stats_pdf = pdf.groupby( ('X', 'A')).agg({('X', 'B'): ['min', 'max'], ('Y', 'C'): 'sum'}) self.assert_eq( stats_kdf.sort_values( by=[('X', 'B', 'min'), ('X', 'B', 'max'), ('Y', 'C', 'sum')] ).reset_index(drop=True), stats_pdf.sort_values( by=[('X', 'B', 'min'), ('X', 'B', 'max'), ('Y', 'C', 'sum')] ).reset_index(drop=True)) def test_aggregate_func_str_list(self): # this is test for cases where only string or list is assigned pdf = pd.DataFrame({'kind': ['cat', 'dog', 'cat', 'dog'], 'height': [9.1, 6.0, 9.5, 34.0], 'weight': [7.9, 7.5, 9.9, 198.0]} ) kdf = ks.from_pandas(pdf) agg_funcs = ['max', 'min', ['min', 'max']] for aggfunc in agg_funcs: # Since in koalas groupby, the order of rows might be different # so sort on index to ensure they have same output sorted_agg_kdf = kdf.groupby('kind').agg(aggfunc).sort_index() sorted_agg_pdf = pdf.groupby('kind').agg(aggfunc).sort_index() self.assert_eq(sorted_agg_kdf, sorted_agg_pdf) # test on multi index column case pdf = pd.DataFrame({'A': [1, 1, 2, 2], 'B': [1, 2, 3, 4], 'C': [0.362, 0.227, 1.267, -0.562]}) kdf = ks.from_pandas(pdf) columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C')]) pdf.columns = columns kdf.columns = columns for aggfunc in agg_funcs: sorted_agg_kdf = kdf.groupby(('X', 'A')).agg(aggfunc).sort_index() sorted_agg_pdf = pdf.groupby(('X', 'A')).agg(aggfunc).sort_index() self.assert_eq(sorted_agg_kdf, sorted_agg_pdf) @unittest.skipIf(pd.__version__ < "0.25.0", "not supported before pandas 0.25.0") def test_aggregate_relabel(self): # this is to test named aggregation in groupby pdf = pd.DataFrame({"group": ['a', 'a', 'b', 'b'], "A": [0, 1, 2, 3], "B": [5, 6, 7, 8]}) kdf = ks.from_pandas(pdf) # different agg column, same function agg_pdf = pdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index() agg_kdf = kdf.groupby("group").agg(a_max=("A", "max"), b_max=("B", "max")).sort_index() self.assert_eq(agg_pdf, agg_kdf) # same agg column, different functions agg_pdf = pdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index() agg_kdf = kdf.groupby("group").agg(b_max=("B", "max"), b_min=("B", "min")).sort_index() self.assert_eq(agg_pdf, agg_kdf) # test on NamedAgg agg_pdf = ( pdf.groupby("group") .agg(b_max=pd.NamedAgg(column="B", aggfunc="max")) .sort_index() ) agg_kdf = ( kdf.groupby("group") .agg(b_max=ks.NamedAgg(column="B", aggfunc="max")) .sort_index() ) self.assert_eq(agg_kdf, agg_pdf) # test on NamedAgg multi columns aggregation agg_pdf = ( pdf.groupby("group") .agg(b_max=pd.NamedAgg(column="B", aggfunc="max"), b_min=pd.NamedAgg(column="B", aggfunc="min")) .sort_index() ) agg_kdf = ( kdf.groupby("group") .agg(b_max=ks.NamedAgg(column="B", aggfunc="max"), b_min=ks.NamedAgg(column="B", aggfunc="min")) .sort_index() ) self.assert_eq(agg_kdf, agg_pdf) def test_describe(self): # support for numeric type, not support for string type yet datas = [] datas.append({"a": [1, 1, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) datas.append({"a": [-1, -1, -3], "b": [-4, -5, -6], "c": [-7, -8, -9]}) datas.append({"a": [0, 0, 0], "b": [0, 0, 0], "c": [0, 8, 0]}) # it is okay if string type column as a group key datas.append({"a": ['a', 'a', 'c'], "b": [4, 5, 6], "c": [7, 8, 9]}) for data in datas: pdf = pd.DataFrame(data) kdf = ks.from_pandas(pdf) describe_pdf = pdf.groupby("a").describe().sort_index() describe_kdf = kdf.groupby("a").describe().sort_index() # since the result of percentile columns are slightly difference from pandas, # we should check them separately: non-percentile columns & percentile columns # 1. Check that non-percentile columns are equal. agg_cols = [col.name for col in kdf.groupby("a")._agg_columns] formatted_percentiles = ["25%", "50%", "75%"] self.assert_eq(repr(describe_kdf.drop(list(product(agg_cols, formatted_percentiles)))), repr(describe_pdf.drop(columns=formatted_percentiles, level=1))) # 2. Check that percentile columns are equal. percentiles = [0.25, 0.5, 0.75] # The interpolation argument is yet to be implemented in Koalas. quantile_pdf = pdf.groupby("a").quantile(percentiles, interpolation="nearest") quantile_pdf = quantile_pdf.unstack(level=1).astype(float) non_percentile_stats = ["count", "mean", "std", "min", "max"] self.assert_eq(repr(describe_kdf.drop(list(product(agg_cols, non_percentile_stats)))), repr(quantile_pdf.rename(columns="{:.0%}".format, level=1))) # not support for string type yet datas = [] datas.append({"a": ['a', 'a', 'c'], "b": ['d', 'e', 'f'], "c": ['g', 'h', 'i']}) datas.append({"a": ['a', 'a', 'c'], "b": [4, 0, 1], "c": ['g', 'h', 'i']}) for data in datas: pdf = pd.DataFrame(data) kdf = ks.from_pandas(pdf) describe_pdf = pdf.groupby("a").describe().sort_index() self.assertRaises(NotImplementedError, lambda: kdf.groupby("a").describe().sort_index()) def test_all_any(self): pdf = pd.DataFrame({'A': [1, 1, 2, 2, 3, 3, 4, 4, 5, 5], 'B': [True, True, True, False, False, False, None, True, None, False]}) kdf = ks.from_pandas(pdf) for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values('A').reset_index(drop=True) self.assert_eq(sort(kdf.groupby('A', as_index=as_index).all()), sort(pdf.groupby('A', as_index=as_index).all())) self.assert_eq(sort(kdf.groupby('A', as_index=as_index).any()), sort(pdf.groupby('A', as_index=as_index).any())) self.assert_eq(sort(kdf.groupby('A', as_index=as_index).all()).B, sort(pdf.groupby('A', as_index=as_index).all()).B) self.assert_eq(sort(kdf.groupby('A', as_index=as_index).any()).B, sort(pdf.groupby('A', as_index=as_index).any()).B) self.assert_eq(kdf.B.groupby(kdf.A).all().sort_index(), pdf.B.groupby(pdf.A).all().sort_index()) self.assert_eq(kdf.B.groupby(kdf.A).any().sort_index(), pdf.B.groupby(pdf.A).any().sort_index()) # multi-index columns columns = pd.MultiIndex.from_tuples([('X', 'A'), ('Y', 'B')]) pdf.columns = columns kdf.columns = columns for as_index in [True, False]: if as_index: sort = lambda df: df.sort_index() else: sort = lambda df: df.sort_values(('X', 'A')).reset_index(drop=True) self.assert_eq(sort(kdf.groupby(('X', 'A'), as_index=as_index).all()), sort(pdf.groupby(('X', 'A'), as_index=as_index).all())) self.assert_eq(sort(kdf.groupby(('X', 'A'), as_index=as_index).any()), sort(pdf.groupby(('X', 'A'), as_index=as_index).any())) def test_raises(self): kdf = ks.DataFrame({'a': [1, 2, 6, 4, 4, 6, 4, 3, 7], 'b': [4, 2, 7, 3, 3, 1, 1, 1, 2]}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9]) # test raises with incorrect key self.assertRaises(ValueError, lambda: kdf.groupby([])) self.assertRaises(KeyError, lambda: kdf.groupby('x')) self.assertRaises(KeyError, lambda: kdf.groupby(['a', 'x'])) self.assertRaises(KeyError, lambda: kdf.groupby('a')['x']) self.assertRaises(KeyError, lambda: kdf.groupby('a')['b', 'x']) self.assertRaises(KeyError, lambda: kdf.groupby('a')[['b', 'x']]) def test_nunique(self): pdf = pd.DataFrame({'a': [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], 'b': [2, 2, 2, 3, 3, 4, 4, 5, 5, 5]}) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("a").agg({"b": "nunique"}).sort_index(), pdf.groupby("a").agg({"b": "nunique"}).sort_index()) self.assert_eq(kdf.groupby("a").nunique().sort_index(), pdf.groupby("a").nunique().sort_index()) self.assert_eq(kdf.groupby("a").nunique(dropna=False).sort_index(), pdf.groupby("a").nunique(dropna=False).sort_index()) self.assert_eq(kdf.groupby("a")['b'].nunique().sort_index(), pdf.groupby("a")['b'].nunique().sort_index()) self.assert_eq(kdf.groupby("a")['b'].nunique(dropna=False).sort_index(), pdf.groupby("a")['b'].nunique(dropna=False).sort_index()) nunique_kdf = kdf.groupby("a", as_index=False).agg({"b": "nunique"}) nunique_pdf = pdf.groupby("a", as_index=False).agg({"b": "nunique"}) self.assert_eq( nunique_kdf.sort_values(['a', 'b']).reset_index(drop=True), nunique_pdf.sort_values(['a', 'b']).reset_index(drop=True)) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('y', 'b')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "a")).nunique().sort_index(), pdf.groupby(("x", "a")).nunique().sort_index()) self.assert_eq(kdf.groupby(("x", "a")).nunique(dropna=False).sort_index(), pdf.groupby(("x", "a")).nunique(dropna=False).sort_index()) def test_value_counts(self): pdf = pd.DataFrame({'A': [1, 2, 2, 3, 3, 3], 'B': [1, 1, 2, 3, 3, 3]}, columns=['A', 'B']) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.groupby("A")['B'].value_counts().sort_index()), repr(pdf.groupby("A")['B'].value_counts().sort_index())) self.assert_eq(repr(kdf.groupby("A")['B'] .value_counts(sort=True, ascending=False).sort_index()), repr(pdf.groupby("A")['B'] .value_counts(sort=True, ascending=False).sort_index())) self.assert_eq(repr(kdf.groupby("A")['B'] .value_counts(sort=True, ascending=True).sort_index()), repr(pdf.groupby("A")['B'] .value_counts(sort=True, ascending=True).sort_index())) def test_size(self): pdf = pd.DataFrame({'A': [1, 2, 2, 3, 3, 3], 'B': [1, 1, 2, 3, 3, 3]}) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("A").size().sort_index(), pdf.groupby("A").size().sort_index()) self.assert_eq(kdf.groupby("A")['B'].size().sort_index(), pdf.groupby("A")['B'].size().sort_index()) self.assert_eq(kdf.groupby(['A', 'B']).size().sort_index(), pdf.groupby(['A', 'B']).size().sort_index()) # multi-index columns columns = pd.MultiIndex.from_tuples([('X', 'A'), ('Y', 'B')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("X", "A")).size().sort_index(), pdf.groupby(("X", "A")).size().sort_index()) self.assert_eq(kdf.groupby([('X', 'A'), ('Y', 'B')]).size().sort_index(), pdf.groupby([('X', 'A'), ('Y', 'B')]).size().sort_index()) def test_diff(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 3, 'b': [1, 1, 2, 3, 5, 8] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").diff().sort_index(), pdf.groupby("b").diff().sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).diff().sort_index(), pdf.groupby(['a', 'b']).diff().sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].diff().sort_index(), pdf.groupby(['b'])['a'].diff().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'b']].diff().sort_index(), pdf.groupby(['b'])[['a', 'b']].diff().sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).diff().sort_index(), pdf.groupby(("x", "b")).diff().sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).diff().sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).diff().sort_index()) def test_rank(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 3, 'b': [1, 1, 2, 3, 5, 8] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").rank().sort_index(), pdf.groupby("b").rank().sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).rank().sort_index(), pdf.groupby(['a', 'b']).rank().sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].rank().sort_index(), pdf.groupby(['b'])['a'].rank().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'c']].rank().sort_index(), pdf.groupby(['b'])[['a', 'c']].rank().sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).rank().sort_index(), pdf.groupby(("x", "b")).rank().sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).rank().sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).rank().sort_index()) def test_cummin(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 3, 'b': [1, 1, 2, 3, 5, 8] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").cummin().sort_index(), pdf.groupby("b").cummin().sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).cummin().sort_index(), pdf.groupby(['a', 'b']).cummin().sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].cummin().sort_index(), pdf.groupby(['b'])['a'].cummin().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'c']].cummin().sort_index(), pdf.groupby(['b'])[['a', 'c']].cummin().sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).cummin().sort_index(), pdf.groupby(("x", "b")).cummin().sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).cummin().sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).cummin().sort_index()) def test_cummax(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 3, 'b': [1, 1, 2, 3, 5, 8] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").cummax().sort_index(), pdf.groupby("b").cummax().sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).cummax().sort_index(), pdf.groupby(['a', 'b']).cummax().sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].cummax().sort_index(), pdf.groupby(['b'])['a'].cummax().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'c']].cummax().sort_index(), pdf.groupby(['b'])[['a', 'c']].cummax().sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).cummax().sort_index(), pdf.groupby(("x", "b")).cummax().sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).cummax().sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).cummax().sort_index()) def test_cumsum(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 3, 'b': [1, 1, 2, 3, 5, 8] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").cumsum().sort_index(), pdf.groupby("b").cumsum().sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).cumsum().sort_index(), pdf.groupby(['a', 'b']).cumsum().sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].cumsum().sort_index(), pdf.groupby(['b'])['a'].cumsum().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'c']].cumsum().sort_index(), pdf.groupby(['b'])[['a', 'c']].cumsum().sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).cumsum().sort_index(), pdf.groupby(("x", "b")).cumsum().sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).cumsum().sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).cumsum().sort_index()) def test_cumprod(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 3, 'b': [1, 1, 2, 3, 5, 8] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").cumprod().sort_index(), pdf.groupby("b").cumprod().sort_index(), almost=True) self.assert_eq(kdf.groupby(['a', 'b']).cumprod().sort_index(), pdf.groupby(['a', 'b']).cumprod().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])['a'].cumprod().sort_index(), pdf.groupby(['b'])['a'].cumprod().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'c']].cumprod().sort_index(), pdf.groupby(['b'])[['a', 'c']].cumprod().sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).cumprod().sort_index(), pdf.groupby(("x", "b")).cumprod().sort_index(), almost=True) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).cumprod().sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).cumprod().sort_index(), almost=True) def test_nsmallest(self): pdf = pd.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3, 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3, 'c': [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3, 'd': [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3}, index=np.random.rand(9 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.groupby(['a'])['b'].nsmallest(1).sort_values()), repr(pdf.groupby(['a'])['b'].nsmallest(1).sort_values())) self.assert_eq(repr(kdf.groupby(['a'])['b'].nsmallest(2).sort_index()), repr(pdf.groupby(['a'])['b'].nsmallest(2).sort_index())) with self.assertRaisesRegex(ValueError, "nsmallest do not support multi-index now"): kdf.set_index(['a', 'b']).groupby(['c'])['d'].nsmallest(1) def test_nlargest(self): pdf = pd.DataFrame({'a': [1, 1, 1, 2, 2, 2, 3, 3, 3] * 3, 'b': [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3, 'c': [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3, 'd': [1, 2, 2, 2, 3, 3, 3, 4, 4] * 3}, index=np.random.rand(9 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(repr(kdf.groupby(['a'])['b'].nlargest(1).sort_values()), repr(pdf.groupby(['a'])['b'].nlargest(1).sort_values())) self.assert_eq(repr(kdf.groupby(['a'])['b'].nlargest(2).sort_index()), repr(pdf.groupby(['a'])['b'].nlargest(2).sort_index())) with self.assertRaisesRegex(ValueError, "nlargest do not support multi-index now"): kdf.set_index(['a', 'b']).groupby(['c'])['d'].nlargest(1) def test_fillna(self): pdf = pd.DataFrame({'A': [1, 1, 2, 2] * 3, 'B': [2, 4, None, 3] * 3, 'C': [None, None, None, 1] * 3, 'D': [0, 1, 5, 4] * 3}, index=np.random.rand(4 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("A").fillna(0).sort_index(), pdf.groupby("A").fillna(0).sort_index()) self.assert_eq(kdf.groupby("A").fillna(method='bfill').sort_index(), pdf.groupby("A").fillna(method='bfill').sort_index()) self.assert_eq(kdf.groupby("A").fillna(method='ffill').sort_index(), pdf.groupby("A").fillna(method='ffill').sort_index()) # multi-index columns columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Z', 'D')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("X", "A")).fillna(0).sort_index(), pdf.groupby(("X", "A")).fillna(0).sort_index()) self.assert_eq(kdf.groupby(("X", "A")).fillna(method='bfill').sort_index(), pdf.groupby(("X", "A")).fillna(method='bfill').sort_index()) self.assert_eq(kdf.groupby(("X", "A")).fillna(method='ffill').sort_index(), pdf.groupby(("X", "A")).fillna(method='ffill').sort_index()) def test_ffill(self): pdf = pd.DataFrame({'A': [1, 1, 2, 2] * 3, 'B': [2, 4, None, 3] * 3, 'C': [None, None, None, 1] * 3, 'D': [0, 1, 5, 4] * 3}, index=np.random.rand(4 * 3)) kdf = ks.from_pandas(pdf) if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"): self.assert_eq(kdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index().drop('A', 1)) else: self.assert_eq(kdf.groupby("A").ffill().sort_index(), pdf.groupby("A").ffill().sort_index()) self.assert_eq(repr(kdf.groupby("A")['B'].ffill().sort_index()), repr(pdf.groupby("A")['B'].ffill().sort_index())) # multi-index columns columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Z', 'D')]) pdf.columns = columns kdf.columns = columns if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"): self.assert_eq(kdf.groupby(("X", "A")).ffill().sort_index(), pdf.groupby(("X", "A")).ffill().sort_index().drop(('X', 'A'), 1)) else: self.assert_eq(kdf.groupby(("X", "A")).ffill().sort_index(), pdf.groupby(("X", "A")).ffill().sort_index()) def test_bfill(self): pdf = pd.DataFrame({'A': [1, 1, 2, 2] * 3, 'B': [2, 4, None, 3] * 3, 'C': [None, None, None, 1] * 3, 'D': [0, 1, 5, 4] * 3}, index=np.random.rand(4 * 3)) kdf = ks.from_pandas(pdf) if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"): self.assert_eq(kdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index().drop('A', 1)) else: self.assert_eq(kdf.groupby("A").bfill().sort_index(), pdf.groupby("A").bfill().sort_index()) self.assert_eq(repr(kdf.groupby("A")['B'].bfill().sort_index()), repr(pdf.groupby("A")['B'].bfill().sort_index())) # multi-index columns columns = pd.MultiIndex.from_tuples([('X', 'A'), ('X', 'B'), ('Y', 'C'), ('Z', 'D')]) pdf.columns = columns kdf.columns = columns if LooseVersion(pd.__version__) <= LooseVersion("0.24.2"): self.assert_eq(kdf.groupby(("X", "A")).bfill().sort_index(), pdf.groupby(("X", "A")).bfill().sort_index().drop(('X', 'A'), 1)) else: self.assert_eq(kdf.groupby(("X", "A")).bfill().sort_index(), pdf.groupby(("X", "A")).bfill().sort_index()) @unittest.skipIf(pd.__version__ < '0.24.0', "not supported before pandas 0.24.0") def test_shift(self): pdf = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3] * 3, 'b': [1, 1, 2, 2, 3, 4] * 3, 'c': [1, 4, 9, 16, 25, 36] * 3}, index=np.random.rand(6 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby('a').shift().sort_index(), pdf.groupby('a').shift().sort_index()) # TODO: seems like a pandas' bug when fill_value is not None? # self.assert_eq(kdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index(), # pdf.groupby(['a', 'b']).shift(periods=-1, fill_value=0).sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].shift().sort_index(), pdf.groupby(['b'])['a'].shift().sort_index(), almost=True) self.assert_eq(kdf.groupby(['a', 'b'])['c'].shift().sort_index(), pdf.groupby(['a', 'b'])['c'].shift().sort_index(), almost=True) self.assert_eq(kdf.groupby(['b'])[['a', 'c']].shift(periods=-1, fill_value=0).sort_index(), pdf.groupby(['b'])[['a', 'c']].shift(periods=-1, fill_value=0).sort_index(), almost=True) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(('x', 'a')).shift().sort_index(), pdf.groupby(('x', 'a')).shift().sort_index()) # TODO: seems like a pandas' bug when fill_value is not None? # self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1, # fill_value=0).sort_index(), # pdf.groupby([('x', 'a'), ('x', 'b')]).shift(periods=-1, # fill_value=0).sort_index()) def test_apply(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], 'b': [1, 1, 2, 3, 5, 8], 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").apply(lambda x: x + 1).sort_index(), pdf.groupby("b").apply(lambda x: x + 1).sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).apply(lambda x: x * x).sort_index(), pdf.groupby(['a', 'b']).apply(lambda x: x * x).sort_index()) self.assert_eq(kdf.groupby(['b'])['c'].apply(lambda x: x).sort_index(), pdf.groupby(['b'])['c'].apply(lambda x: x).sort_index()) with self.assertRaisesRegex(TypeError, "<class 'int'> object is not callable"): kdf.groupby("b").apply(1) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).apply(lambda x: x + 1).sort_index(), pdf.groupby(("x", "b")).apply(lambda x: x + 1).sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]).apply(lambda x: x * x).sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]).apply(lambda x: x * x).sort_index()) def test_apply_with_new_dataframe(self): pdf = pd.DataFrame({ "timestamp": [0.0, 0.5, 1.0, 0.0, 0.5], "car_id": ['A', 'A', 'A', 'B', 'B'] }) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.groupby('car_id').apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(), pdf.groupby('car_id').apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index()) self.assert_eq( kdf.groupby('car_id') .apply(lambda df: pd.DataFrame({'mean': [df['timestamp'].mean()]})).sort_index(), pdf.groupby('car_id') .apply(lambda df: pd.DataFrame({"mean": [df['timestamp'].mean()]})).sort_index()) # dataframe with 1000+ records pdf = pd.DataFrame({ "timestamp": [0.0, 0.5, 1.0, 0.0, 0.5] * 300, "car_id": ['A', 'A', 'A', 'B', 'B'] * 300 }) kdf = ks.from_pandas(pdf) self.assert_eq( kdf.groupby('car_id').apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index(), pdf.groupby('car_id').apply(lambda _: pd.DataFrame({"column": [0.0]})).sort_index()) self.assert_eq( kdf.groupby('car_id') .apply(lambda df: pd.DataFrame({"mean": [df['timestamp'].mean()]})).sort_index(), pdf.groupby('car_id') .apply(lambda df: pd.DataFrame({"mean": [df['timestamp'].mean()]})).sort_index()) def test_transform(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], 'b': [1, 1, 2, 3, 5, 8], 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").transform(lambda x: x + 1).sort_index(), pdf.groupby("b").transform(lambda x: x + 1).sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).transform(lambda x: x * x).sort_index(), pdf.groupby(['a', 'b']).transform(lambda x: x * x).sort_index()) self.assert_eq(kdf.groupby(['b'])['c'].transform(lambda x: x).sort_index(), pdf.groupby(['b'])['c'].transform(lambda x: x).sort_index()) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).transform(lambda x: x + 1).sort_index(), pdf.groupby(("x", "b")).transform(lambda x: x + 1).sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]) .transform(lambda x: x * x).sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]) .transform(lambda x: x * x).sort_index()) with option_context('compute.shortcut_limit', 1000): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6] * 300, 'b': [1, 1, 2, 3, 5, 8] * 300, 'c': [1, 4, 9, 16, 25, 36] * 300}, columns=['a', 'b', 'c']) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").transform(lambda x: x + 1).sort_index(), pdf.groupby("b").transform(lambda x: x + 1).sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).transform(lambda x: x * x).sort_index(), pdf.groupby(['a', 'b']).transform(lambda x: x * x).sort_index()) self.assert_eq(kdf.groupby(['b'])['a'].transform(lambda x: x).sort_index(), pdf.groupby(['b'])['a'].transform(lambda x: x).sort_index()) with self.assertRaisesRegex(TypeError, "<class 'int'> object is not callable"): kdf.groupby("b").transform(1) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")).transform(lambda x: x + 1).sort_index(), pdf.groupby(("x", "b")).transform(lambda x: x + 1).sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]) .transform(lambda x: x * x).sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]) .transform(lambda x: x * x).sort_index()) def test_filter(self): pdf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6], 'b': [1, 1, 2, 3, 5, 8], 'c': [1, 4, 9, 16, 25, 36]}, columns=['a', 'b', 'c']) kdf = ks.from_pandas(pdf) self.assert_eq(kdf.groupby("b").filter(lambda x: x.b.mean() < 4).sort_index(), pdf.groupby("b").filter(lambda x: x.b.mean() < 4).sort_index()) self.assert_eq(kdf.groupby(['a', 'b']).filter(lambda x: any(x.a == 2)).sort_index(), pdf.groupby(['a', 'b']).filter(lambda x: any(x.a == 2)).sort_index()) with self.assertRaisesRegex(TypeError, "<class 'int'> object is not callable"): kdf.groupby("b").filter(1) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(kdf.groupby(("x", "b")) .filter(lambda x: x[('x', 'b')].mean() < 4).sort_index(), pdf.groupby(("x", "b")) .filter(lambda x: x[('x', 'b')].mean() < 4).sort_index()) self.assert_eq(kdf.groupby([('x', 'a'), ('x', 'b')]) .filter(lambda x: any(x[('x', 'a')] == 2)).sort_index(), pdf.groupby([('x', 'a'), ('x', 'b')]) .filter(lambda x: any(x[('x', 'a')] == 2)).sort_index()) def test_idxmax(self): pdf = pd.DataFrame({'a': [1, 1, 2, 2, 3] * 3, 'b': [1, 2, 3, 4, 5] * 3, 'c': [5, 4, 3, 2, 1] * 3}, index=np.random.rand(5 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.groupby(['a']).idxmax().sort_index(), kdf.groupby(['a']).idxmax().sort_index()) self.assert_eq(pdf.groupby(['a']).idxmax(skipna=False).sort_index(), kdf.groupby(['a']).idxmax(skipna=False).sort_index()) with self.assertRaisesRegex(ValueError, 'idxmax only support one-level index now'): kdf.set_index(['a', 'b']).groupby(['c']).idxmax() # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.groupby(('x', 'a')).idxmax().sort_index(), kdf.groupby(('x', 'a')).idxmax().sort_index()) self.assert_eq(pdf.groupby(('x', 'a')).idxmax(skipna=False).sort_index(), kdf.groupby(('x', 'a')).idxmax(skipna=False).sort_index()) def test_idxmin(self): pdf = pd.DataFrame({'a': [1, 1, 2, 2, 3] * 3, 'b': [1, 2, 3, 4, 5] * 3, 'c': [5, 4, 3, 2, 1] * 3}, index=np.random.rand(5 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.groupby(['a']).idxmin().sort_index(), kdf.groupby(['a']).idxmin().sort_index()) self.assert_eq(pdf.groupby(['a']).idxmin(skipna=False).sort_index(), kdf.groupby(['a']).idxmin(skipna=False).sort_index()) with self.assertRaisesRegex(ValueError, 'idxmin only support one-level index now'): kdf.set_index(['a', 'b']).groupby(['c']).idxmin() # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.groupby(('x', 'a')).idxmin().sort_index(), kdf.groupby(('x', 'a')).idxmin().sort_index()) self.assert_eq(pdf.groupby(('x', 'a')).idxmin(skipna=False).sort_index(), kdf.groupby(('x', 'a')).idxmin(skipna=False).sort_index()) def test_head(self): pdf = pd.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3] * 3, 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5] * 3, 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6] * 3}, index=np.random.rand(10 * 3)) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.groupby('a').head(2).sort_index(), kdf.groupby('a').head(2).sort_index()) self.assert_eq(pdf.groupby('a').head(-2).sort_index(), kdf.groupby('a').head(-2).sort_index()) self.assert_eq(pdf.groupby('a').head(100000).sort_index(), kdf.groupby('a').head(100000).sort_index()) self.assert_eq(pdf.groupby('a')['b'].head(2).sort_index(), kdf.groupby('a')['b'].head(2).sort_index()) self.assert_eq(pdf.groupby('a')['b'].head(-2).sort_index(), kdf.groupby('a')['b'].head(-2).sort_index()) self.assert_eq(pdf.groupby('a')['b'].head(100000).sort_index(), kdf.groupby('a')['b'].head(100000).sort_index()) # multi-index midx = pd.MultiIndex([['x', 'y'], ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']], [[0, 0, 0, 0, 0, 1, 1, 1, 1, 1], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]) pdf = pd.DataFrame({'a': [1, 1, 1, 1, 2, 2, 2, 3, 3, 3], 'b': [2, 3, 1, 4, 6, 9, 8, 10, 7, 5], 'c': [3, 5, 2, 5, 1, 2, 6, 4, 3, 6]}, columns=['a', 'b', 'c'], index=midx) kdf = ks.from_pandas(pdf) self.assert_eq(pdf.groupby('a').head(2).sort_index(), kdf.groupby('a').head(2).sort_index()) self.assert_eq(pdf.groupby('a').head(-2).sort_index(), kdf.groupby('a').head(-2).sort_index()) self.assert_eq(pdf.groupby('a').head(100000).sort_index(), kdf.groupby('a').head(100000).sort_index()) self.assert_eq(pdf.groupby('a')['b'].head(2).sort_index(), kdf.groupby('a')['b'].head(2).sort_index()) self.assert_eq(pdf.groupby('a')['b'].head(-2).sort_index(), kdf.groupby('a')['b'].head(-2).sort_index()) self.assert_eq(pdf.groupby('a')['b'].head(100000).sort_index(), kdf.groupby('a')['b'].head(100000).sort_index()) # multi-index columns columns = pd.MultiIndex.from_tuples([('x', 'a'), ('x', 'b'), ('y', 'c')]) pdf.columns = columns kdf.columns = columns self.assert_eq(pdf.groupby(('x', 'a')).head(2).sort_index(), kdf.groupby(('x', 'a')).head(2).sort_index()) self.assert_eq(pdf.groupby(('x', 'a')).head(-2).sort_index(), kdf.groupby(('x', 'a')).head(-2).sort_index()) self.assert_eq(pdf.groupby(('x', 'a')).head(100000).sort_index(), kdf.groupby(('x', 'a')).head(100000).sort_index()) def test_missing(self): kdf = ks.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9]}) # DataFrameGroupBy functions missing_functions = inspect.getmembers(_MissingPandasLikeDataFrameGroupBy, inspect.isfunction) unsupported_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'unsupported_function'] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.groupby('a'), name)() deprecated_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'deprecated_function'] for name in deprecated_functions: with self.assertRaisesRegex(PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated" .format(name)): getattr(kdf.groupby('a'), name)() # SeriesGroupBy functions missing_functions = inspect.getmembers(_MissingPandasLikeSeriesGroupBy, inspect.isfunction) unsupported_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'unsupported_function'] for name in unsupported_functions: with self.assertRaisesRegex( PandasNotImplementedError, "method.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.a.groupby(kdf.a), name)() deprecated_functions = [name for (name, type_) in missing_functions if type_.__name__ == 'deprecated_function'] for name in deprecated_functions: with self.assertRaisesRegex(PandasNotImplementedError, "method.*GroupBy.*{}.*is deprecated" .format(name)): getattr(kdf.a.groupby(kdf.a), name)() # DataFrameGroupBy properties missing_properties = inspect.getmembers(_MissingPandasLikeDataFrameGroupBy, lambda o: isinstance(o, property)) unsupported_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'unsupported_property'] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.groupby('a'), name) deprecated_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'deprecated_property'] for name in deprecated_properties: with self.assertRaisesRegex(PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated" .format(name)): getattr(kdf.groupby('a'), name) # SeriesGroupBy properties missing_properties = inspect.getmembers(_MissingPandasLikeSeriesGroupBy, lambda o: isinstance(o, property)) unsupported_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'unsupported_property'] for name in unsupported_properties: with self.assertRaisesRegex( PandasNotImplementedError, "property.*GroupBy.*{}.*not implemented( yet\\.|\\. .+)".format(name)): getattr(kdf.a.groupby(kdf.a), name) deprecated_properties = [name for (name, type_) in missing_properties if type_.fget.__name__ == 'deprecated_property'] for name in deprecated_properties: with self.assertRaisesRegex(PandasNotImplementedError, "property.*GroupBy.*{}.*is deprecated" .format(name)): getattr(kdf.a.groupby(kdf.a), name) @staticmethod def test_is_multi_agg_with_relabel(): assert _is_multi_agg_with_relabel(a='max') is False assert _is_multi_agg_with_relabel(a_min=('a', 'max'), a_max=('a', 'min')) is True
1
14,084
so should be fixed here also
databricks-koalas
py
@@ -6478,8 +6478,13 @@ dr_get_mcontext_priv(dcontext_t *dcontext, dr_mcontext_t *dmc, priv_mcontext_t * * when most old clients have been converted, remove this (we'll * still return false) */ - CLIENT_ASSERT(dmc->size == sizeof(dr_mcontext_t), - "dr_mcontext_t.size field not set properly"); + CLIENT_ASSERT( + dmc->size == sizeof(dr_mcontext_t) || + /* Opmask storage has been added for AVX-512 (xref i#1312). + * An older client's mcontext may be filled in w/o the new structure. + */ + dmc->size == sizeof(dr_mcontext_t) - sizeof(dr_opmask_t), + "dr_mcontext_t.size field not set properly"); CLIENT_ASSERT(dmc->flags != 0 && (dmc->flags & ~(DR_MC_ALL)) == 0, "dr_mcontext_t.flags field not set properly"); } else
1
/* ****************************************************************************** * Copyright (c) 2010-2019 Google, Inc. All rights reserved. * Copyright (c) 2010-2011 Massachusetts Institute of Technology All rights reserved. * Copyright (c) 2002-2010 VMware, Inc. All rights reserved. * ******************************************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2002-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2002 Hewlett-Packard Company */ /* * instrument.c - interface for instrumentation */ #include "../globals.h" /* just to disable warning C4206 about an empty file */ #include "instrument.h" #include "arch.h" #include "instr.h" #include "instr_create.h" #include "instrlist.h" #include "decode.h" #include "disassemble.h" #include "../fragment.h" #include "../fcache.h" #include "../emit.h" #include "../link.h" #include "../monitor.h" /* for mark_trace_head */ #include <stdarg.h> /* for varargs */ #include "../nudge.h" /* for nudge_internal() */ #include "../synch.h" #include "../annotations.h" #include "../translate.h" #ifdef UNIX # include <sys/time.h> /* ITIMER_* */ # include "../unix/module.h" /* redirect_* functions */ #endif #ifdef CLIENT_INTERFACE /* in utils.c, not exported to everyone */ extern ssize_t do_file_write(file_t f, const char *fmt, va_list ap); # ifdef DEBUG /* case 10450: give messages to clients */ /* we can't undef ASSERT b/c of DYNAMO_OPTION */ # undef ASSERT_TRUNCATE # undef ASSERT_BITFIELD_TRUNCATE # undef ASSERT_NOT_REACHED # define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD # define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD # define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD # endif /* PR 200065: User passes us the shared library, we look up "dr_init" * or "dr_client_main" and call it. From there, the client can register which events * it wishes to receive. */ # define INSTRUMENT_INIT_NAME_LEGACY "dr_init" # define INSTRUMENT_INIT_NAME "dr_client_main" /* PR 250952: version check * If changing this, don't forget to update: * - lib/dr_defines.h _USES_DR_VERSION_ * - api/docs/footer.html */ # define USES_DR_VERSION_NAME "_USES_DR_VERSION_" /* Should we expose this for use in samples/tracedump.c? * Also, if we change this, need to change the symlink generation * in core/CMakeLists.txt: at that point should share single define. */ /* OLDEST_COMPATIBLE_VERSION now comes from configure.h */ /* The 3rd version number, the bugfix/patch number, should not affect * compatibility, so our version check number simply uses: * major*100 + minor * Which gives us room for 100 minor versions per major. */ # define NEWEST_COMPATIBLE_VERSION CURRENT_API_VERSION /* Store the unique not-part-of-version build number (the version * BUILD_NUMBER is limited to 64K and is not guaranteed to be unique) * somewhere accessible at a customer site. We could alternatively * pull it out of our DYNAMORIO_DEFINES string. */ DR_API const char *unique_build_number = STRINGIFY(UNIQUE_BUILD_NUMBER); /* Acquire when registering or unregistering event callbacks * Also held when invoking events, which happens much more often * than registration changes, so we use rwlock */ DECLARE_CXTSWPROT_VAR(static read_write_lock_t callback_registration_lock, INIT_READWRITE_LOCK(callback_registration_lock)); /* Structures for maintaining lists of event callbacks */ typedef void (*callback_t)(void); typedef struct _callback_list_t { callback_t *callbacks; /* array of callback functions */ size_t num; /* number of callbacks registered */ size_t size; /* allocated space (may be larger than num) */ } callback_list_t; /* This is a little convoluted. The following is a macro to iterate * over a list of callbacks and call each function. We use a macro * instead of a function so we can pass the function type and perform * a typecast. We need to copy the callback list before iterating to * support the possibility of one callback unregistering another and * messing up the list while we're iterating. We'll optimize the case * for 5 or fewer registered callbacks and stack-allocate the temp * list. Otherwise, we'll heap-allocate the temp. * * We allow the args to use the var "idx" to access the client index. * * We consider the first registered callback to have the highest * priority and call it last. If we gave the last registered callback * the highest priority, a client could re-register a routine to * increase its priority. That seems a little weird. */ /* */ # define FAST_COPY_SIZE 5 # define call_all_ret(ret, retop, postop, vec, type, ...) \ do { \ size_t idx, num; \ /* we will be called even if no callbacks (i.e., (vec).num == 0) */ \ /* we guarantee we're in DR state at all callbacks and clean calls */ \ /* XXX: add CLIENT_ASSERT here */ \ d_r_read_lock(&callback_registration_lock); \ num = (vec).num; \ if (num == 0) { \ d_r_read_unlock(&callback_registration_lock); \ } else if (num <= FAST_COPY_SIZE) { \ callback_t tmp[FAST_COPY_SIZE]; \ memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \ d_r_read_unlock(&callback_registration_lock); \ for (idx = 0; idx < num; idx++) { \ ret retop(((type)tmp[num - idx - 1])(__VA_ARGS__)) postop; \ } \ } else { \ callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, num, \ ACCT_OTHER, UNPROTECTED); \ memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \ d_r_read_unlock(&callback_registration_lock); \ for (idx = 0; idx < num; idx++) { \ ret retop(((type)tmp[num - idx - 1])(__VA_ARGS__)) postop; \ } \ HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tmp, callback_t, num, ACCT_OTHER, \ UNPROTECTED); \ } \ } while (0) /* It's less error-prone if we just have one call_all macro. We'll * reuse call_all_ret above for callbacks that don't have a return * value by assigning to a dummy var. Note that this means we'll * have to pass an int-returning type to call_all() */ # define call_all(vec, type, ...) \ do { \ int dummy; \ call_all_ret(dummy, =, , vec, type, __VA_ARGS__); \ } while (0) /* Lists of callbacks for each event type. Note that init and nudge * callback lists are kept in the client_lib_t data structure below. * We could store all lists on a per-client basis, but we can iterate * over these lists slightly more efficiently if we store all * callbacks for a specific event in a single list. */ static callback_list_t exit_callbacks = { 0, }; static callback_list_t thread_init_callbacks = { 0, }; static callback_list_t thread_exit_callbacks = { 0, }; # ifdef UNIX static callback_list_t fork_init_callbacks = { 0, }; # endif static callback_list_t bb_callbacks = { 0, }; static callback_list_t trace_callbacks = { 0, }; # ifdef CUSTOM_TRACES static callback_list_t end_trace_callbacks = { 0, }; # endif static callback_list_t fragdel_callbacks = { 0, }; static callback_list_t restore_state_callbacks = { 0, }; static callback_list_t restore_state_ex_callbacks = { 0, }; static callback_list_t module_load_callbacks = { 0, }; static callback_list_t module_unload_callbacks = { 0, }; static callback_list_t filter_syscall_callbacks = { 0, }; static callback_list_t pre_syscall_callbacks = { 0, }; static callback_list_t post_syscall_callbacks = { 0, }; static callback_list_t kernel_xfer_callbacks = { 0, }; # ifdef WINDOWS static callback_list_t exception_callbacks = { 0, }; # else static callback_list_t signal_callbacks = { 0, }; # endif # ifdef PROGRAM_SHEPHERDING static callback_list_t security_violation_callbacks = { 0, }; # endif static callback_list_t persist_ro_size_callbacks = { 0, }; static callback_list_t persist_ro_callbacks = { 0, }; static callback_list_t resurrect_ro_callbacks = { 0, }; static callback_list_t persist_rx_size_callbacks = { 0, }; static callback_list_t persist_rx_callbacks = { 0, }; static callback_list_t resurrect_rx_callbacks = { 0, }; static callback_list_t persist_rw_size_callbacks = { 0, }; static callback_list_t persist_rw_callbacks = { 0, }; static callback_list_t resurrect_rw_callbacks = { 0, }; static callback_list_t persist_patch_callbacks = { 0, }; /* An array of client libraries. We use a static array instead of a * heap-allocated list so we can load the client libs before * initializing DR's heap. */ typedef struct _client_lib_t { client_id_t id; char path[MAXIMUM_PATH]; /* PR 366195: dlopen() handle truly is opaque: != start */ shlib_handle_t lib; app_pc start; app_pc end; /* The raw option string, which after i#1736 contains token-delimiting quotes */ char options[MAX_OPTION_LENGTH]; /* The option string with token-delimiting quotes removed for backward compat */ char legacy_options[MAX_OPTION_LENGTH]; /* The parsed options: */ int argc; const char **argv; /* We need to associate nudge events with a specific client so we * store that list here in the client_lib_t instead of using a * single global list. */ callback_list_t nudge_callbacks; } client_lib_t; /* these should only be modified prior to instrument_init(), since no * readers of the client_libs array (event handlers, etc.) use synch */ static client_lib_t client_libs[MAX_CLIENT_LIBS] = { { 0, } }; static size_t num_client_libs = 0; static void *persist_user_data[MAX_CLIENT_LIBS]; # ifdef WINDOWS /* private kernel32 lib, used to print to console */ static bool print_to_console; static shlib_handle_t priv_kernel32; typedef BOOL(WINAPI *kernel32_WriteFile_t)(HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED); static kernel32_WriteFile_t kernel32_WriteFile; static ssize_t dr_write_to_console_varg(bool to_stdout, const char *fmt, ...); # endif bool client_requested_exit; # ifdef WINDOWS /* used for nudge support */ static bool block_client_nudge_threads = false; DECLARE_CXTSWPROT_VAR(static int num_client_nudge_threads, 0); # endif # ifdef CLIENT_SIDELINE /* # of sideline threads */ DECLARE_CXTSWPROT_VAR(static int num_client_sideline_threads, 0); # endif # if defined(WINDOWS) || defined(CLIENT_SIDELINE) /* protects block_client_nudge_threads and incrementing num_client_nudge_threads */ DECLARE_CXTSWPROT_VAR(static mutex_t client_thread_count_lock, INIT_LOCK_FREE(client_thread_count_lock)); # endif static vm_area_vector_t *client_aux_libs; static bool track_where_am_i; # ifdef WINDOWS DECLARE_CXTSWPROT_VAR(static mutex_t client_aux_lib64_lock, INIT_LOCK_FREE(client_aux_lib64_lock)); # endif /****************************************************************************/ /* INTERNAL ROUTINES */ static bool char_is_quote(char c) { return c == '"' || c == '\'' || c == '`'; } static void parse_option_array(client_id_t client_id, const char *opstr, int *argc OUT, const char ***argv OUT, size_t max_token_size) { const char **a; int cnt; const char *s; char *token = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, max_token_size, ACCT_CLIENT, UNPROTECTED); for (cnt = 0, s = dr_get_token(opstr, token, max_token_size); s != NULL; s = dr_get_token(s, token, max_token_size)) { cnt++; } cnt++; /* add 1 so 0 can be "app" */ a = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, const char *, cnt, ACCT_CLIENT, UNPROTECTED); cnt = 0; a[cnt] = dr_strdup(dr_get_client_path(client_id) HEAPACCT(ACCT_CLIENT)); cnt++; for (s = dr_get_token(opstr, token, max_token_size); s != NULL; s = dr_get_token(s, token, max_token_size)) { a[cnt] = dr_strdup(token HEAPACCT(ACCT_CLIENT)); cnt++; } HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, token, char, max_token_size, ACCT_CLIENT, UNPROTECTED); *argc = cnt; *argv = a; } static bool free_option_array(int argc, const char **argv) { int i; for (i = 0; i < argc; i++) { dr_strfree(argv[i] HEAPACCT(ACCT_CLIENT)); } HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, argv, char *, argc, ACCT_CLIENT, UNPROTECTED); return true; } static void add_callback(callback_list_t *vec, void (*func)(void), bool unprotect) { if (func == NULL) { CLIENT_ASSERT(false, "trying to register a NULL callback"); return; } if (standalone_library) { CLIENT_ASSERT(false, "events not supported in standalone library mode"); return; } d_r_write_lock(&callback_registration_lock); /* Although we're receiving a pointer to a callback_list_t, we're * usually modifying a static var. */ if (unprotect) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); } /* We may already have an open slot since we allocate in twos and * because we don't bother to free the storage when we remove the * callback. Check and only allocate if necessary. */ if (vec->num == vec->size) { callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, vec->size + 2, /* Let's allocate 2 */ ACCT_OTHER, UNPROTECTED); if (tmp == NULL) { CLIENT_ASSERT(false, "out of memory: can't register callback"); d_r_write_unlock(&callback_registration_lock); return; } if (vec->callbacks != NULL) { memcpy(tmp, vec->callbacks, vec->num * sizeof(callback_t)); HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size, ACCT_OTHER, UNPROTECTED); } vec->callbacks = tmp; vec->size += 2; } vec->callbacks[vec->num] = func; vec->num++; if (unprotect) { SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } d_r_write_unlock(&callback_registration_lock); } static bool remove_callback(callback_list_t *vec, void (*func)(void), bool unprotect) { size_t i; bool found = false; if (func == NULL) { CLIENT_ASSERT(false, "trying to unregister a NULL callback"); return false; } d_r_write_lock(&callback_registration_lock); /* Although we're receiving a pointer to a callback_list_t, we're * usually modifying a static var. */ if (unprotect) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); } for (i = 0; i < vec->num; i++) { if (vec->callbacks[i] == func) { size_t j; /* shift down the entries on the tail */ for (j = i; j < vec->num - 1; j++) { vec->callbacks[j] = vec->callbacks[j + 1]; } vec->num -= 1; found = true; break; } } if (unprotect) { SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } d_r_write_unlock(&callback_registration_lock); return found; } /* This should only be called prior to instrument_init(), * since no readers of the client_libs array use synch * and since this routine assumes .data is writable. */ static void add_client_lib(const char *path, const char *id_str, const char *options) { client_id_t id; shlib_handle_t client_lib; DEBUG_DECLARE(size_t i); ASSERT(!dynamo_initialized); /* if ID not specified, we'll default to 0 */ id = (id_str == NULL) ? 0 : strtoul(id_str, NULL, 16); # ifdef DEBUG /* Check for conflicting IDs */ for (i = 0; i < num_client_libs; i++) { CLIENT_ASSERT(client_libs[i].id != id, "Clients have the same ID"); } # endif if (num_client_libs == MAX_CLIENT_LIBS) { CLIENT_ASSERT(false, "Max number of clients reached"); return; } LOG(GLOBAL, LOG_INTERP, 4, "about to load client library %s\n", path); client_lib = load_shared_library(path, IF_X64_ELSE(DYNAMO_OPTION(reachable_client), true)); if (client_lib == NULL) { char msg[MAXIMUM_PATH * 4]; char err[MAXIMUM_PATH * 2]; shared_library_error(err, BUFFER_SIZE_ELEMENTS(err)); snprintf(msg, BUFFER_SIZE_ELEMENTS(msg), ".\n\tError opening instrumentation library %s:\n\t%s", path, err); NULL_TERMINATE_BUFFER(msg); /* PR 232490 - malformed library names or incorrect * permissions shouldn't blow up an app in release builds as * they may happen at customer sites with a third party * client. */ /* PR 408318: 32-vs-64 errors should NOT be fatal to continue * in debug build across execve chains. Xref i#147. * XXX: w/ -private_loader, err always equals "error in private loader" * and so we never match here! */ IF_UNIX(if (strstr(err, "wrong ELF class") == NULL)) CLIENT_ASSERT(false, msg); SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4, get_application_name(), get_application_pid(), path, msg); } else { /* PR 250952: version check */ int *uses_dr_version = (int *)lookup_library_routine(client_lib, USES_DR_VERSION_NAME); if (uses_dr_version == NULL || *uses_dr_version < OLDEST_COMPATIBLE_VERSION || *uses_dr_version > NEWEST_COMPATIBLE_VERSION) { /* not a fatal usage error since we want release build to continue */ CLIENT_ASSERT(false, "client library is incompatible with this version of DR"); SYSLOG(SYSLOG_ERROR, CLIENT_VERSION_INCOMPATIBLE, 2, get_application_name(), get_application_pid()); } else { size_t idx = num_client_libs++; client_libs[idx].id = id; client_libs[idx].lib = client_lib; app_pc client_start, client_end; # if defined(STATIC_LIBRARY) && defined(LINUX) // For DR under static+linux we know that the client and DR core // code are built into the app itself. To avoid various edge cases // in finding the "library" bounds, delegate this boundary discovery // to the dll bounds functions. xref i#3387. client_start = get_dynamorio_dll_start(); client_end = get_dynamorio_dll_end(); ASSERT(client_start <= (app_pc)uses_dr_version && (app_pc)uses_dr_version < client_end); # else DEBUG_DECLARE(bool ok =) shared_library_bounds(client_lib, (byte *)uses_dr_version, NULL, &client_start, &client_end); ASSERT(ok); # endif client_libs[idx].start = client_start; client_libs[idx].end = client_end; LOG(GLOBAL, LOG_INTERP, 1, "loaded %s at " PFX "-" PFX "\n", path, client_libs[idx].start, client_libs[idx].end); # ifdef X64 /* Now that we map the client within the constraints, this request * should always succeed. */ if (DYNAMO_OPTION(reachable_client)) { request_region_be_heap_reachable(client_libs[idx].start, client_libs[idx].end - client_libs[idx].start); } # endif strncpy(client_libs[idx].path, path, BUFFER_SIZE_ELEMENTS(client_libs[idx].path)); NULL_TERMINATE_BUFFER(client_libs[idx].path); if (options != NULL) { strncpy(client_libs[idx].options, options, BUFFER_SIZE_ELEMENTS(client_libs[idx].options)); NULL_TERMINATE_BUFFER(client_libs[idx].options); } /* We'll look up dr_client_main and call it in instrument_init */ } } } void instrument_load_client_libs(void) { if (CLIENTS_EXIST()) { char buf[MAX_LIST_OPTION_LENGTH]; char *path; string_option_read_lock(); strncpy(buf, INTERNAL_OPTION(client_lib), BUFFER_SIZE_ELEMENTS(buf)); string_option_read_unlock(); NULL_TERMINATE_BUFFER(buf); /* We're expecting path;ID;options triples */ path = buf; do { char *id = NULL; char *options = NULL; char *next_path = NULL; id = strstr(path, ";"); if (id != NULL) { id[0] = '\0'; id++; options = strstr(id, ";"); if (options != NULL) { options[0] = '\0'; options++; next_path = strstr(options, ";"); if (next_path != NULL) { next_path[0] = '\0'; next_path++; } } } # ifdef STATIC_LIBRARY /* We ignore client library paths and allow client code anywhere in the app. * We have a check in load_shared_library() to avoid loading * a 2nd copy of the app. * We do support passing client ID and options via the first -client_lib. */ add_client_lib(get_application_name(), id == NULL ? "0" : id, options == NULL ? "" : options); break; # endif add_client_lib(path, id, options); path = next_path; } while (path != NULL); } } static void init_client_aux_libs(void) { if (client_aux_libs == NULL) { VMVECTOR_ALLOC_VECTOR(client_aux_libs, GLOBAL_DCONTEXT, VECTOR_SHARED, client_aux_libs); } } void instrument_init(void) { size_t i; init_client_aux_libs(); if (num_client_libs > 0) { /* We no longer distinguish in-DR vs in-client crashes, as many crashes in * the DR lib are really client bugs. * We expect most end-user tools to call dr_set_client_name() so we * have generic defaults here: */ set_exception_strings("Tool", "your tool's issue tracker"); } /* Iterate over the client libs and call each init routine */ for (i = 0; i < num_client_libs; i++) { void (*init)(client_id_t, int, const char **) = (void (*)(client_id_t, int, const char **))( lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME)); void (*legacy)(client_id_t) = (void (*)(client_id_t))( lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME_LEGACY)); /* we can't do this in instrument_load_client_libs() b/c vmheap * is not set up at that point */ all_memory_areas_lock(); update_all_memory_areas(client_libs[i].start, client_libs[i].end, /* FIXME: need to walk the sections: but may be * better to obfuscate from clients anyway. * We can't set as MEMPROT_NONE as that leads to * bugs if the app wants to interpret part of * its code section (xref PR 504629). */ MEMPROT_READ, DR_MEMTYPE_IMAGE); all_memory_areas_unlock(); /* i#1736: parse the options up front */ parse_option_array(client_libs[i].id, client_libs[i].options, &client_libs[i].argc, &client_libs[i].argv, MAX_OPTION_LENGTH); # ifdef STATIC_LIBRARY /* We support the app having client code anywhere, so there does not * have to be an init routine that we call. This means the app * may have to iterate modules on its own. */ # else /* Since the user has to register all other events, it * doesn't make sense to provide the -client_lib * option for a module that doesn't export an init routine. */ CLIENT_ASSERT(init != NULL || legacy != NULL, "client does not export a dr_client_main or dr_init routine"); # endif if (init != NULL) (*init)(client_libs[i].id, client_libs[i].argc, client_libs[i].argv); else if (legacy != NULL) (*legacy)(client_libs[i].id); } /* We now initialize the 1st thread before coming here, so we can * hand the client a dcontext; so we need to specially generate * the thread init event now. An alternative is to have * dr_get_global_drcontext(), but that's extra complexity for no * real reason. * We raise the thread init event prior to the module load events * so the client can access a dcontext in module load events (i#1339). */ if (thread_init_callbacks.num > 0) { instrument_thread_init(get_thread_private_dcontext(), false, false); } /* If the client just registered the module-load event, let's * assume it wants to be informed of *all* modules and tell it * which modules are already loaded. If the client registers the * event later, it will need to use the module iterator routines * to retrieve currently loaded modules. We use the dr_module_iterator * exposed to the client to avoid locking issues. */ if (module_load_callbacks.num > 0) { dr_module_iterator_t *mi = dr_module_iterator_start(); while (dr_module_iterator_hasnext(mi)) { module_data_t *data = dr_module_iterator_next(mi); instrument_module_load(data, true /*already loaded*/); /* XXX; more efficient to set this flag during dr_module_iterator_start */ os_module_set_flag(data->start, MODULE_LOAD_EVENT); dr_free_module_data(data); } dr_module_iterator_stop(mi); } } static void free_callback_list(callback_list_t *vec) { if (vec->callbacks != NULL) { HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size, ACCT_OTHER, UNPROTECTED); vec->callbacks = NULL; } vec->size = 0; vec->num = 0; } static void free_all_callback_lists() { free_callback_list(&exit_callbacks); free_callback_list(&thread_init_callbacks); free_callback_list(&thread_exit_callbacks); # ifdef UNIX free_callback_list(&fork_init_callbacks); # endif free_callback_list(&bb_callbacks); free_callback_list(&trace_callbacks); # ifdef CUSTOM_TRACES free_callback_list(&end_trace_callbacks); # endif free_callback_list(&fragdel_callbacks); free_callback_list(&restore_state_callbacks); free_callback_list(&restore_state_ex_callbacks); free_callback_list(&module_load_callbacks); free_callback_list(&module_unload_callbacks); free_callback_list(&filter_syscall_callbacks); free_callback_list(&pre_syscall_callbacks); free_callback_list(&post_syscall_callbacks); free_callback_list(&kernel_xfer_callbacks); # ifdef WINDOWS free_callback_list(&exception_callbacks); # else free_callback_list(&signal_callbacks); # endif # ifdef PROGRAM_SHEPHERDING free_callback_list(&security_violation_callbacks); # endif free_callback_list(&persist_ro_size_callbacks); free_callback_list(&persist_ro_callbacks); free_callback_list(&resurrect_ro_callbacks); free_callback_list(&persist_rx_size_callbacks); free_callback_list(&persist_rx_callbacks); free_callback_list(&resurrect_rx_callbacks); free_callback_list(&persist_rw_size_callbacks); free_callback_list(&persist_rw_callbacks); free_callback_list(&resurrect_rw_callbacks); free_callback_list(&persist_patch_callbacks); } void instrument_exit_post_sideline(void) { # if defined(WINDOWS) || defined(CLIENT_SIDELINE) DELETE_LOCK(client_thread_count_lock); # endif } void instrument_exit(void) { /* Note - currently own initexit lock when this is called (see PR 227619). */ /* support dr_get_mcontext() from the exit event */ if (!standalone_library) get_thread_private_dcontext()->client_data->mcontext_in_dcontext = true; call_all(exit_callbacks, int (*)(), /* It seems the compiler is confused if we pass no var args * to the call_all macro. Bogus NULL arg */ NULL); if (IF_DEBUG_ELSE(true, doing_detach)) { /* Unload all client libs and free any allocated storage */ size_t i; for (i = 0; i < num_client_libs; i++) { free_callback_list(&client_libs[i].nudge_callbacks); unload_shared_library(client_libs[i].lib); if (client_libs[i].argv != NULL) free_option_array(client_libs[i].argc, client_libs[i].argv); } free_all_callback_lists(); } vmvector_delete_vector(GLOBAL_DCONTEXT, client_aux_libs); client_aux_libs = NULL; num_client_libs = 0; # ifdef WINDOWS DELETE_LOCK(client_aux_lib64_lock); # endif DELETE_READWRITE_LOCK(callback_registration_lock); } bool is_in_client_lib(app_pc addr) { /* NOTE: we use this routine for detecting exceptions in * clients. If we add a callback on that event we'll have to be * sure to deliver it only to the right client. */ size_t i; for (i = 0; i < num_client_libs; i++) { if ((addr >= (app_pc)client_libs[i].start) && (addr < client_libs[i].end)) { return true; } } if (client_aux_libs != NULL && vmvector_overlap(client_aux_libs, addr, addr + 1)) return true; return false; } bool get_client_bounds(client_id_t client_id, app_pc *start /*OUT*/, app_pc *end /*OUT*/) { if (client_id >= num_client_libs) return false; if (start != NULL) *start = (app_pc)client_libs[client_id].start; if (end != NULL) *end = (app_pc)client_libs[client_id].end; return true; } const char * get_client_path_from_addr(app_pc addr) { size_t i; for (i = 0; i < num_client_libs; i++) { if ((addr >= (app_pc)client_libs[i].start) && (addr < client_libs[i].end)) { return client_libs[i].path; } } return ""; } bool is_valid_client_id(client_id_t id) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { return true; } } return false; } void dr_register_exit_event(void (*func)(void)) { add_callback(&exit_callbacks, (void (*)(void))func, true); } bool dr_unregister_exit_event(void (*func)(void)) { return remove_callback(&exit_callbacks, (void (*)(void))func, true); } void dr_register_bb_event(dr_emit_flags_t (*func)(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating)) { if (!INTERNAL_OPTION(code_api)) { CLIENT_ASSERT(false, "asking for bb event when code_api is disabled"); return; } add_callback(&bb_callbacks, (void (*)(void))func, true); } bool dr_unregister_bb_event(dr_emit_flags_t (*func)(void *drcontext, void *tag, instrlist_t *bb, bool for_trace, bool translating)) { return remove_callback(&bb_callbacks, (void (*)(void))func, true); } void dr_register_trace_event(dr_emit_flags_t (*func)(void *drcontext, void *tag, instrlist_t *trace, bool translating)) { if (!INTERNAL_OPTION(code_api)) { CLIENT_ASSERT(false, "asking for trace event when code_api is disabled"); return; } add_callback(&trace_callbacks, (void (*)(void))func, true); } bool dr_unregister_trace_event(dr_emit_flags_t (*func)(void *drcontext, void *tag, instrlist_t *trace, bool translating)) { return remove_callback(&trace_callbacks, (void (*)(void))func, true); } # ifdef CUSTOM_TRACES void dr_register_end_trace_event(dr_custom_trace_action_t (*func)(void *drcontext, void *tag, void *next_tag)) { if (!INTERNAL_OPTION(code_api)) { CLIENT_ASSERT(false, "asking for end-trace event when code_api is disabled"); return; } add_callback(&end_trace_callbacks, (void (*)(void))func, true); } bool dr_unregister_end_trace_event(dr_custom_trace_action_t (*func)(void *drcontext, void *tag, void *next_tag)) { return remove_callback(&end_trace_callbacks, (void (*)(void))func, true); } # endif void dr_register_delete_event(void (*func)(void *drcontext, void *tag)) { if (!INTERNAL_OPTION(code_api)) { CLIENT_ASSERT(false, "asking for delete event when code_api is disabled"); return; } add_callback(&fragdel_callbacks, (void (*)(void))func, true); } bool dr_unregister_delete_event(void (*func)(void *drcontext, void *tag)) { return remove_callback(&fragdel_callbacks, (void (*)(void))func, true); } void dr_register_restore_state_event(void (*func)(void *drcontext, void *tag, dr_mcontext_t *mcontext, bool restore_memory, bool app_code_consistent)) { if (!INTERNAL_OPTION(code_api)) { CLIENT_ASSERT(false, "asking for restore state event when code_api is disabled"); return; } add_callback(&restore_state_callbacks, (void (*)(void))func, true); } bool dr_unregister_restore_state_event(void (*func)(void *drcontext, void *tag, dr_mcontext_t *mcontext, bool restore_memory, bool app_code_consistent)) { return remove_callback(&restore_state_callbacks, (void (*)(void))func, true); } void dr_register_restore_state_ex_event(bool (*func)(void *drcontext, bool restore_memory, dr_restore_state_info_t *info)) { if (!INTERNAL_OPTION(code_api)) { CLIENT_ASSERT(false, "asking for restore_state_ex event when code_api disabled"); return; } add_callback(&restore_state_ex_callbacks, (void (*)(void))func, true); } bool dr_unregister_restore_state_ex_event(bool (*func)(void *drcontext, bool restore_memory, dr_restore_state_info_t *info)) { return remove_callback(&restore_state_ex_callbacks, (void (*)(void))func, true); } void dr_register_thread_init_event(void (*func)(void *drcontext)) { add_callback(&thread_init_callbacks, (void (*)(void))func, true); } bool dr_unregister_thread_init_event(void (*func)(void *drcontext)) { return remove_callback(&thread_init_callbacks, (void (*)(void))func, true); } void dr_register_thread_exit_event(void (*func)(void *drcontext)) { add_callback(&thread_exit_callbacks, (void (*)(void))func, true); } bool dr_unregister_thread_exit_event(void (*func)(void *drcontext)) { return remove_callback(&thread_exit_callbacks, (void (*)(void))func, true); } # ifdef UNIX void dr_register_fork_init_event(void (*func)(void *drcontext)) { add_callback(&fork_init_callbacks, (void (*)(void))func, true); } bool dr_unregister_fork_init_event(void (*func)(void *drcontext)) { return remove_callback(&fork_init_callbacks, (void (*)(void))func, true); } # endif void dr_register_module_load_event(void (*func)(void *drcontext, const module_data_t *info, bool loaded)) { add_callback(&module_load_callbacks, (void (*)(void))func, true); } bool dr_unregister_module_load_event(void (*func)(void *drcontext, const module_data_t *info, bool loaded)) { return remove_callback(&module_load_callbacks, (void (*)(void))func, true); } void dr_register_module_unload_event(void (*func)(void *drcontext, const module_data_t *info)) { add_callback(&module_unload_callbacks, (void (*)(void))func, true); } bool dr_unregister_module_unload_event(void (*func)(void *drcontext, const module_data_t *info)) { return remove_callback(&module_unload_callbacks, (void (*)(void))func, true); } # ifdef WINDOWS void dr_register_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt)) { add_callback(&exception_callbacks, (bool (*)(void))func, true); } bool dr_unregister_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt)) { return remove_callback(&exception_callbacks, (bool (*)(void))func, true); } # else void dr_register_signal_event(dr_signal_action_t (*func)(void *drcontext, dr_siginfo_t *siginfo)) { add_callback(&signal_callbacks, (void (*)(void))func, true); } bool dr_unregister_signal_event(dr_signal_action_t (*func)(void *drcontext, dr_siginfo_t *siginfo)) { return remove_callback(&signal_callbacks, (void (*)(void))func, true); } # endif /* WINDOWS */ void dr_register_filter_syscall_event(bool (*func)(void *drcontext, int sysnum)) { add_callback(&filter_syscall_callbacks, (void (*)(void))func, true); } bool dr_unregister_filter_syscall_event(bool (*func)(void *drcontext, int sysnum)) { return remove_callback(&filter_syscall_callbacks, (void (*)(void))func, true); } void dr_register_pre_syscall_event(bool (*func)(void *drcontext, int sysnum)) { add_callback(&pre_syscall_callbacks, (void (*)(void))func, true); } bool dr_unregister_pre_syscall_event(bool (*func)(void *drcontext, int sysnum)) { return remove_callback(&pre_syscall_callbacks, (void (*)(void))func, true); } void dr_register_post_syscall_event(void (*func)(void *drcontext, int sysnum)) { add_callback(&post_syscall_callbacks, (void (*)(void))func, true); } bool dr_unregister_post_syscall_event(void (*func)(void *drcontext, int sysnum)) { return remove_callback(&post_syscall_callbacks, (void (*)(void))func, true); } void dr_register_kernel_xfer_event(void (*func)(void *drcontext, const dr_kernel_xfer_info_t *info)) { add_callback(&kernel_xfer_callbacks, (void (*)(void))func, true); } bool dr_unregister_kernel_xfer_event(void (*func)(void *drcontext, const dr_kernel_xfer_info_t *info)) { return remove_callback(&kernel_xfer_callbacks, (void (*)(void))func, true); } # ifdef PROGRAM_SHEPHERDING void dr_register_security_event(void (*func)(void *drcontext, void *source_tag, app_pc source_pc, app_pc target_pc, dr_security_violation_type_t violation, dr_mcontext_t *mcontext, dr_security_violation_action_t *action)) { add_callback(&security_violation_callbacks, (void (*)(void))func, true); } bool dr_unregister_security_event(void (*func)(void *drcontext, void *source_tag, app_pc source_pc, app_pc target_pc, dr_security_violation_type_t violation, dr_mcontext_t *mcontext, dr_security_violation_action_t *action)) { return remove_callback(&security_violation_callbacks, (void (*)(void))func, true); } # endif void dr_register_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { add_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func, /* the nudge callback list is stored on the heap, so * we don't need to unprotect the .data section when * we update the list */ false); return; } } CLIENT_ASSERT(false, "dr_register_nudge_event: invalid client ID"); } bool dr_unregister_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { return remove_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func, /* the nudge callback list is stored on the heap, so * we don't need to unprotect the .data section when * we update the list */ false); } } CLIENT_ASSERT(false, "dr_unregister_nudge_event: invalid client ID"); return false; } dr_config_status_t dr_nudge_client_ex(process_id_t process_id, client_id_t client_id, uint64 argument, uint timeout_ms) { if (process_id == get_process_id()) { size_t i; # ifdef WINDOWS pre_second_thread(); # endif for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == client_id) { if (client_libs[i].nudge_callbacks.num == 0) { CLIENT_ASSERT(false, "dr_nudge_client: no nudge handler registered"); return false; } return nudge_internal(process_id, NUDGE_GENERIC(client), argument, client_id, timeout_ms); } } return false; } else { return nudge_internal(process_id, NUDGE_GENERIC(client), argument, client_id, timeout_ms); } } bool dr_nudge_client(client_id_t client_id, uint64 argument) { return dr_nudge_client_ex(get_process_id(), client_id, argument, 0) == DR_SUCCESS; } # ifdef WINDOWS DR_API bool dr_is_nudge_thread(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "invalid parameter to dr_is_nudge_thread"); return dcontext->nudge_target != NULL; } # endif void instrument_client_thread_init(dcontext_t *dcontext, bool client_thread) { if (dcontext->client_data == NULL) { dcontext->client_data = HEAP_TYPE_ALLOC(dcontext, client_data_t, ACCT_OTHER, UNPROTECTED); memset(dcontext->client_data, 0x0, sizeof(client_data_t)); # ifdef CLIENT_SIDELINE ASSIGN_INIT_LOCK_FREE(dcontext->client_data->sideline_mutex, sideline_mutex); # endif CLIENT_ASSERT(dynamo_initialized || thread_init_callbacks.num == 0 || client_thread, "1st call to instrument_thread_init should have no cbs"); } # ifdef CLIENT_SIDELINE if (client_thread) { ATOMIC_INC(int, num_client_sideline_threads); /* We don't call dynamo_thread_not_under_dynamo() b/c we want itimers. */ dcontext->thread_record->under_dynamo_control = false; dcontext->client_data->is_client_thread = true; dcontext->client_data->suspendable = true; } # endif /* CLIENT_SIDELINE */ } void instrument_thread_init(dcontext_t *dcontext, bool client_thread, bool valid_mc) { /* Note that we're called twice for the initial thread: once prior * to instrument_init() (PR 216936) to set up the dcontext client * field (at which point there should be no callbacks since client * has not had a chance to register any) (now split out, but both * routines are called prior to instrument_init()), and once after * instrument_init() to call the client event. */ # if defined(CLIENT_INTERFACE) && defined(WINDOWS) bool swap_peb = false; # endif if (client_thread) { /* no init event */ return; } # if defined(CLIENT_INTERFACE) && defined(WINDOWS) /* i#996: we might be in app's state. * It is simpler to check and swap here than earlier on thread init paths. */ if (dr_using_app_state(dcontext)) { swap_peb_pointer(dcontext, true /*to priv*/); swap_peb = true; } # endif /* i#117/PR 395156: support dr_get_mcontext() from the thread init event */ if (valid_mc) dcontext->client_data->mcontext_in_dcontext = true; call_all(thread_init_callbacks, int (*)(void *), (void *)dcontext); if (valid_mc) dcontext->client_data->mcontext_in_dcontext = false; # if defined(CLIENT_INTERFACE) && defined(WINDOWS) if (swap_peb) swap_peb_pointer(dcontext, false /*to app*/); # endif } # ifdef UNIX void instrument_fork_init(dcontext_t *dcontext) { call_all(fork_init_callbacks, int (*)(void *), (void *)dcontext); } # endif /* PR 536058: split the exit event from thread cleanup, to provide a * dcontext in the process exit event */ void instrument_thread_exit_event(dcontext_t *dcontext) { # ifdef CLIENT_SIDELINE if (IS_CLIENT_THREAD(dcontext) /* if nudge thread calls dr_exit_process() it will be marked as a client * thread: rule it out here so we properly clean it up */ IF_WINDOWS(&&dcontext->nudge_target == NULL)) { ATOMIC_DEC(int, num_client_sideline_threads); /* no exit event */ return; } # endif /* i#1394: best-effort to try to avoid crashing thread exit events * where thread init was never called. */ if (!dynamo_initialized) return; /* support dr_get_mcontext() from the exit event */ dcontext->client_data->mcontext_in_dcontext = true; /* Note - currently own initexit lock when this is called (see PR 227619). */ call_all(thread_exit_callbacks, int (*)(void *), (void *)dcontext); } void instrument_thread_exit(dcontext_t *dcontext) { # ifdef DEBUG client_todo_list_t *todo; client_flush_req_t *flush; # endif # ifdef DEBUG /* PR 470957: avoid racy crashes by not freeing in release build */ # ifdef CLIENT_SIDELINE DELETE_LOCK(dcontext->client_data->sideline_mutex); # endif /* could be heap space allocated for the todo list */ todo = dcontext->client_data->to_do; while (todo != NULL) { client_todo_list_t *next_todo = todo->next; if (todo->ilist != NULL) { instrlist_clear_and_destroy(dcontext, todo->ilist); } HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, UNPROTECTED); todo = next_todo; } /* could be heap space allocated for the flush list */ flush = dcontext->client_data->flush_list; while (flush != NULL) { client_flush_req_t *next_flush = flush->next; HEAP_TYPE_FREE(dcontext, flush, client_flush_req_t, ACCT_CLIENT, UNPROTECTED); flush = next_flush; } HEAP_TYPE_FREE(dcontext, dcontext->client_data, client_data_t, ACCT_OTHER, UNPROTECTED); dcontext->client_data = NULL; /* for mutex_wait_contended_lock() */ dcontext->is_client_thread_exiting = true; /* for is_using_app_peb() */ # endif /* DEBUG */ } bool dr_bb_hook_exists(void) { return (bb_callbacks.num > 0); } bool dr_trace_hook_exists(void) { return (trace_callbacks.num > 0); } bool dr_fragment_deleted_hook_exists(void) { return (fragdel_callbacks.num > 0); } bool dr_end_trace_hook_exists(void) { return (end_trace_callbacks.num > 0); } bool dr_thread_exit_hook_exists(void) { return (thread_exit_callbacks.num > 0); } bool dr_exit_hook_exists(void) { return (exit_callbacks.num > 0); } bool dr_xl8_hook_exists(void) { return (restore_state_callbacks.num > 0 || restore_state_ex_callbacks.num > 0); } #endif /* CLIENT_INTERFACE */ /* needed outside of CLIENT_INTERFACE for simpler USE_BB_BUILDING_LOCK_STEADY_STATE() */ bool dr_modload_hook_exists(void) { /* We do not support (as documented in the module event doxygen) * the client changing this during bb building, as that will mess * up USE_BB_BUILDING_LOCK_STEADY_STATE(). */ return IF_CLIENT_INTERFACE_ELSE(module_load_callbacks.num > 0, false); } #ifdef CLIENT_INTERFACE bool hide_tag_from_client(app_pc tag) { # ifdef WINDOWS /* Case 10009: Basic blocks that consist of a single jump into the * interception buffer should be obscured from clients. Clients * will see the displaced code, so we'll provide the address of this * block if the client asks for the address of the displaced code. * * Note that we assume the jump is the first instruction in the * BB for any blocks that jump to the interception buffer. */ if (is_intercepted_app_pc(tag, NULL) || /* Displaced app code is now in the landing pad, so skip the * jump from the interception buffer to the landing pad */ is_in_interception_buffer(tag) || /* Landing pads that exist between hook points and the trampolines * shouldn't be seen by the client too. PR 250294. */ is_on_interception_initial_route(tag) || /* PR 219351: if we lose control on a callback and get it back on * one of our syscall trampolines, we'll appear at the jmp out of * the interception buffer to the int/sysenter instruction. The * problem is that our syscall trampolines, unlike our other * intercepted code, are hooked earlier than the real action point * and we have displaced app code at the start of the interception * buffer: we hook at the wrapper entrance and return w/ a jmp to * the sysenter/int instr. When creating bbs at the start we hack * it to make it look like there is no hook. But on retaking control * we end up w/ this jmp out that won't be solved w/ our normal * mechanism for other hook jmp-outs: so we just suppress and the * client next sees the post-syscall bb. It already saw a gap. */ is_syscall_trampoline(tag, NULL)) return true; # endif return false; } # ifdef DEBUG /* PR 214962: client must set translation fields */ static void check_ilist_translations(instrlist_t *ilist) { /* Ensure client set the translation field for all non-meta * instrs, even if it didn't return DR_EMIT_STORE_TRANSLATIONS * (since we may decide ourselves to store) */ instr_t *in; for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) { if (!instr_opcode_valid(in)) { CLIENT_ASSERT(INTERNAL_OPTION(fast_client_decode), "level 0 instr found"); } else if (instr_is_app(in)) { DOLOG(LOG_INTERP, 1, { if (instr_get_translation(in) == NULL) { d_r_loginst(get_thread_private_dcontext(), 1, in, "translation is NULL"); } }); CLIENT_ASSERT(instr_get_translation(in) != NULL, "translation field must be set for every app instruction"); } else { /* The meta instr could indeed not affect app state, but * better I think to assert and make them put in an * empty restore event callback in that case. */ DOLOG(LOG_INTERP, 1, { if (instr_get_translation(in) != NULL && !instr_is_our_mangling(in) && !dr_xl8_hook_exists()) { d_r_loginst(get_thread_private_dcontext(), 1, in, "translation != NULL"); } }); CLIENT_ASSERT(instr_get_translation(in) == NULL || instr_is_our_mangling(in) || dr_xl8_hook_exists(), /* FIXME: if multiple clients, we need to check that this * particular client has the callback: but we have * no way to do that other than looking at library * bounds...punting for now */ "a meta instr should not have its translation field " "set without also having a restore_state callback"); } } } # endif /* Returns true if the bb hook is called */ bool instrument_basic_block(dcontext_t *dcontext, app_pc tag, instrlist_t *bb, bool for_trace, bool translating, dr_emit_flags_t *emitflags) { dr_emit_flags_t ret = DR_EMIT_DEFAULT; /* return false if no BB hooks are registered */ if (bb_callbacks.num == 0) return false; if (hide_tag_from_client(tag)) { LOG(THREAD, LOG_INTERP, 3, "hiding tag " PFX " from client\n", tag); return false; } /* do not expand or up-decode the instrlist, client gets to choose * whether and how to do that */ # ifdef DEBUG LOG(THREAD, LOG_INTERP, 3, "\ninstrument_basic_block ******************\n"); LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n"); if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0) instrlist_disassemble(dcontext, tag, bb, THREAD); # endif /* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */ if (!translating && !for_trace) dcontext->client_data->mcontext_in_dcontext = true; /* Note - currently we are couldbelinking and hold the * bb_building lock when this is called (see PR 227619). */ /* We or together the return values */ call_all_ret(ret, |=, , bb_callbacks, int (*)(void *, void *, instrlist_t *, bool, bool), (void *)dcontext, (void *)tag, bb, for_trace, translating); if (emitflags != NULL) *emitflags = ret; DOCHECK(1, { check_ilist_translations(bb); }); dcontext->client_data->mcontext_in_dcontext = false; if (IF_DEBUG_ELSE(for_trace, false)) { CLIENT_ASSERT(instrlist_get_return_target(bb) == NULL && instrlist_get_fall_through_target(bb) == NULL, "instrlist_set_return/fall_through_target" " cannot be used on traces"); } # ifdef DEBUG LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n"); if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0) instrlist_disassemble(dcontext, tag, bb, THREAD); # endif return true; } /* Give the user the completely mangled and optimized trace just prior * to emitting into code cache, user gets final crack at it */ dr_emit_flags_t instrument_trace(dcontext_t *dcontext, app_pc tag, instrlist_t *trace, bool translating) { dr_emit_flags_t ret = DR_EMIT_DEFAULT; # ifdef UNSUPPORTED_API instr_t *instr; # endif if (trace_callbacks.num == 0) return DR_EMIT_DEFAULT; /* do not expand or up-decode the instrlist, client gets to choose * whether and how to do that */ # ifdef DEBUG LOG(THREAD, LOG_INTERP, 3, "\ninstrument_trace ******************\n"); LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n"); if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0) instrlist_disassemble(dcontext, tag, trace, THREAD); # endif /* We always pass Level 3 instrs to the client, since we no longer * expose the expansion routines. */ # ifdef UNSUPPORTED_API for (instr = instrlist_first_expanded(dcontext, trace); instr != NULL; instr = instr_get_next_expanded(dcontext, trace, instr)) { instr_decode(dcontext, instr); } /* ASSUMPTION: all ctis are already at Level 3, so we don't have * to do a separate pass to fix up intra-list targets like * instrlist_decode_cti() does */ # endif /* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */ if (!translating) dcontext->client_data->mcontext_in_dcontext = true; /* We or together the return values */ call_all_ret(ret, |=, , trace_callbacks, int (*)(void *, void *, instrlist_t *, bool), (void *)dcontext, (void *)tag, trace, translating); DOCHECK(1, { check_ilist_translations(trace); }); CLIENT_ASSERT(instrlist_get_return_target(trace) == NULL && instrlist_get_fall_through_target(trace) == NULL, "instrlist_set_return/fall_through_target" " cannot be used on traces"); dcontext->client_data->mcontext_in_dcontext = false; # ifdef DEBUG LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n"); if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0) instrlist_disassemble(dcontext, tag, trace, THREAD); # endif return ret; } /* Notify user when a fragment is deleted from the cache * FIXME PR 242544: how does user know whether this is a shadowed copy or the * real thing? The user might free memory that shouldn't be freed! */ void instrument_fragment_deleted(dcontext_t *dcontext, app_pc tag, uint flags) { if (fragdel_callbacks.num == 0) return; # ifdef WINDOWS /* Case 10009: We don't call the basic block hook for blocks that * are jumps to the interception buffer, so we'll hide them here * as well. */ if (!TEST(FRAG_IS_TRACE, flags) && hide_tag_from_client(tag)) return; # endif /* PR 243008: we don't expose GLOBAL_DCONTEXT, so change to NULL. * Our comments warn the user about this. */ if (dcontext == GLOBAL_DCONTEXT) dcontext = NULL; call_all(fragdel_callbacks, int (*)(void *, void *), (void *)dcontext, (void *)tag); } bool instrument_restore_state(dcontext_t *dcontext, bool restore_memory, dr_restore_state_info_t *info) { bool res = true; /* Support both legacy and extended handlers */ if (restore_state_callbacks.num > 0) { call_all(restore_state_callbacks, int (*)(void *, void *, dr_mcontext_t *, bool, bool), (void *)dcontext, info->fragment_info.tag, info->mcontext, restore_memory, info->fragment_info.app_code_consistent); } if (restore_state_ex_callbacks.num > 0) { /* i#220/PR 480565: client has option of failing the translation. * We fail it if any client wants to, short-circuiting in that case. * This does violate the "priority order" of events where the * last one is supposed to have final say b/c it won't even * see the event (xref i#424). */ call_all_ret(res, = res &&, , restore_state_ex_callbacks, int (*)(void *, bool, dr_restore_state_info_t *), (void *)dcontext, restore_memory, info); } CLIENT_ASSERT(!restore_memory || res, "translation should not fail for restore_memory=true"); return res; } # ifdef CUSTOM_TRACES /* Ask whether to end trace prior to adding next_tag fragment. * Return values: * CUSTOM_TRACE_DR_DECIDES = use standard termination criteria * CUSTOM_TRACE_END_NOW = end trace * CUSTOM_TRACE_CONTINUE = do not end trace */ dr_custom_trace_action_t instrument_end_trace(dcontext_t *dcontext, app_pc trace_tag, app_pc next_tag) { dr_custom_trace_action_t ret = CUSTOM_TRACE_DR_DECIDES; if (end_trace_callbacks.num == 0) return ret; /* Highest priority callback decides how to end the trace (see * call_all_ret implementation) */ call_all_ret(ret, =, , end_trace_callbacks, int (*)(void *, void *, void *), (void *)dcontext, (void *)trace_tag, (void *)next_tag); return ret; } # endif static module_data_t * create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point, uint flags, const module_names_t *names, const char *full_path # ifdef WINDOWS , version_number_t file_version, version_number_t product_version, uint checksum, uint timestamp, size_t mod_size # else , bool contiguous, uint num_segments, module_segment_t *os_segments, module_segment_data_t *segments, uint timestamp # ifdef MACOS , uint current_version, uint compatibility_version, const byte uuid[16] # endif # endif ) { # ifndef WINDOWS uint i; # endif module_data_t *copy = (module_data_t *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_data_t, ACCT_CLIENT, UNPROTECTED); memset(copy, 0, sizeof(module_data_t)); copy->start = start; copy->end = end; copy->entry_point = entry_point; copy->flags = flags; if (full_path != NULL) copy->full_path = dr_strdup(full_path HEAPACCT(ACCT_CLIENT)); if (names->module_name != NULL) copy->names.module_name = dr_strdup(names->module_name HEAPACCT(ACCT_CLIENT)); if (names->file_name != NULL) copy->names.file_name = dr_strdup(names->file_name HEAPACCT(ACCT_CLIENT)); # ifdef WINDOWS if (names->exe_name != NULL) copy->names.exe_name = dr_strdup(names->exe_name HEAPACCT(ACCT_CLIENT)); if (names->rsrc_name != NULL) copy->names.rsrc_name = dr_strdup(names->rsrc_name HEAPACCT(ACCT_CLIENT)); copy->file_version = file_version; copy->product_version = product_version; copy->checksum = checksum; copy->timestamp = timestamp; copy->module_internal_size = mod_size; # else copy->contiguous = contiguous; copy->num_segments = num_segments; copy->segments = (module_segment_data_t *)HEAP_ARRAY_ALLOC( GLOBAL_DCONTEXT, module_segment_data_t, num_segments, ACCT_VMAREAS, PROTECTED); if (os_segments != NULL) { ASSERT(segments == NULL); for (i = 0; i < num_segments; i++) { copy->segments[i].start = os_segments[i].start; copy->segments[i].end = os_segments[i].end; copy->segments[i].prot = os_segments[i].prot; copy->segments[i].offset = os_segments[i].offset; } } else { ASSERT(segments != NULL); if (segments != NULL) { memcpy(copy->segments, segments, num_segments * sizeof(module_segment_data_t)); } } copy->timestamp = timestamp; # ifdef MACOS copy->current_version = current_version; copy->compatibility_version = compatibility_version; memcpy(copy->uuid, uuid, sizeof(copy->uuid)); # endif # endif return copy; } module_data_t * copy_module_area_to_module_data(const module_area_t *area) { if (area == NULL) return NULL; return create_and_initialize_module_data( area->start, area->end, area->entry_point, 0, &area->names, area->full_path # ifdef WINDOWS , area->os_data.file_version, area->os_data.product_version, area->os_data.checksum, area->os_data.timestamp, area->os_data.module_internal_size # else , area->os_data.contiguous, area->os_data.num_segments, area->os_data.segments, NULL, area->os_data.timestamp # ifdef MACOS , area->os_data.current_version, area->os_data.compatibility_version, area->os_data.uuid # endif # endif ); } DR_API /* Makes a copy of a module_data_t for returning to the client. We return a copy so * we don't have to hold the module areas list lock while in the client (xref PR 225020). * Note - dr_data is allowed to be NULL. */ module_data_t * dr_copy_module_data(const module_data_t *data) { if (data == NULL) return NULL; return create_and_initialize_module_data( data->start, data->end, data->entry_point, 0, &data->names, data->full_path # ifdef WINDOWS , data->file_version, data->product_version, data->checksum, data->timestamp, data->module_internal_size # else , data->contiguous, data->num_segments, NULL, data->segments, data->timestamp # ifdef MACOS , data->current_version, data->compatibility_version, data->uuid # endif # endif ); } DR_API /* Used to free a module_data_t created by dr_copy_module_data() */ void dr_free_module_data(module_data_t *data) { dcontext_t *dcontext = get_thread_private_dcontext(); if (data == NULL) return; if (dcontext != NULL && data == dcontext->client_data->no_delete_mod_data) { CLIENT_ASSERT(false, "dr_free_module_data: don\'t free module_data passed to " "the image load or image unload event callbacks."); return; } # ifdef UNIX HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, data->segments, module_segment_data_t, data->num_segments, ACCT_VMAREAS, PROTECTED); # endif if (data->full_path != NULL) dr_strfree(data->full_path HEAPACCT(ACCT_CLIENT)); free_module_names(&data->names HEAPACCT(ACCT_CLIENT)); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, module_data_t, ACCT_CLIENT, UNPROTECTED); } DR_API bool dr_module_contains_addr(const module_data_t *data, app_pc addr) { /* XXX: this duplicates module_contains_addr(), but we have two different * data structures (module_area_t and module_data_t) so it's hard to share. */ # ifdef WINDOWS return (addr >= data->start && addr < data->end); # else if (data->contiguous) return (addr >= data->start && addr < data->end); else { uint i; for (i = 0; i < data->num_segments; i++) { if (addr >= data->segments[i].start && addr < data->segments[i].end) return true; } } return false; # endif } /* Looks up module containing pc (assumed to be fully loaded). * If it exists and its client module load event has not been called, calls it. */ void instrument_module_load_trigger(app_pc pc) { if (CLIENTS_EXIST()) { module_area_t *ma; module_data_t *client_data = NULL; os_get_module_info_lock(); ma = module_pc_lookup(pc); if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) { /* switch to write lock */ os_get_module_info_unlock(); os_get_module_info_write_lock(); ma = module_pc_lookup(pc); if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) { ma->flags |= MODULE_LOAD_EVENT; client_data = copy_module_area_to_module_data(ma); os_get_module_info_write_unlock(); instrument_module_load(client_data, true /*i#884: already loaded*/); dr_free_module_data(client_data); } else os_get_module_info_write_unlock(); } else os_get_module_info_unlock(); } } /* Notify user when a module is loaded */ void instrument_module_load(module_data_t *data, bool previously_loaded) { /* Note - during DR initialization this routine is called before we've set up a * dcontext for the main thread and before we've called instrument_init. It's okay * since there's no way a callback will be registered and we'll return immediately. */ dcontext_t *dcontext; if (module_load_callbacks.num == 0) return; dcontext = get_thread_private_dcontext(); /* client shouldn't delete this */ dcontext->client_data->no_delete_mod_data = data; call_all(module_load_callbacks, int (*)(void *, module_data_t *, bool), (void *)dcontext, data, previously_loaded); dcontext->client_data->no_delete_mod_data = NULL; } /* Notify user when a module is unloaded */ void instrument_module_unload(module_data_t *data) { dcontext_t *dcontext; if (module_unload_callbacks.num == 0) return; dcontext = get_thread_private_dcontext(); /* client shouldn't delete this */ dcontext->client_data->no_delete_mod_data = data; call_all(module_unload_callbacks, int (*)(void *, module_data_t *), (void *)dcontext, data); dcontext->client_data->no_delete_mod_data = NULL; } /* returns whether this sysnum should be intercepted */ bool instrument_filter_syscall(dcontext_t *dcontext, int sysnum) { bool ret = false; /* if client does not filter then we don't intercept anything */ if (filter_syscall_callbacks.num == 0) return ret; /* if any client wants to intercept, then we intercept */ call_all_ret(ret, =, || ret, filter_syscall_callbacks, bool (*)(void *, int), (void *)dcontext, sysnum); return ret; } /* returns whether this syscall should execute */ bool instrument_pre_syscall(dcontext_t *dcontext, int sysnum) { bool exec = true; dcontext->client_data->in_pre_syscall = true; /* clear flag from dr_syscall_invoke_another() */ dcontext->client_data->invoke_another_syscall = false; if (pre_syscall_callbacks.num > 0) { dr_where_am_i_t old_whereami = dcontext->whereami; dcontext->whereami = DR_WHERE_SYSCALL_HANDLER; DODEBUG({ /* Avoid the common mistake of forgetting a filter event. */ CLIENT_ASSERT(filter_syscall_callbacks.num > 0, "A filter event must be " "provided when using pre- and post-syscall events"); }); /* Skip syscall if any client wants to skip it, but don't short-circuit, * as skipping syscalls is usually done when the effect of the syscall * will be emulated in some other way. The app is typically meant to * think that the syscall succeeded. Thus, other tool components * should see the syscall as well (xref i#424). */ call_all_ret(exec, =, &&exec, pre_syscall_callbacks, bool (*)(void *, int), (void *)dcontext, sysnum); dcontext->whereami = old_whereami; } dcontext->client_data->in_pre_syscall = false; return exec; } void instrument_post_syscall(dcontext_t *dcontext, int sysnum) { dr_where_am_i_t old_whereami = dcontext->whereami; if (post_syscall_callbacks.num == 0) return; DODEBUG({ /* Avoid the common mistake of forgetting a filter event. */ CLIENT_ASSERT(filter_syscall_callbacks.num > 0, "A filter event must be " "provided when using pre- and post-syscall events"); }); dcontext->whereami = DR_WHERE_SYSCALL_HANDLER; dcontext->client_data->in_post_syscall = true; call_all(post_syscall_callbacks, int (*)(void *, int), (void *)dcontext, sysnum); dcontext->client_data->in_post_syscall = false; dcontext->whereami = old_whereami; } bool instrument_invoke_another_syscall(dcontext_t *dcontext) { return dcontext->client_data->invoke_another_syscall; } bool instrument_kernel_xfer(dcontext_t *dcontext, dr_kernel_xfer_type_t type, os_cxt_ptr_t source_os_cxt, dr_mcontext_t *source_dmc, priv_mcontext_t *source_mc, app_pc target_pc, reg_t target_xsp, os_cxt_ptr_t target_os_cxt, priv_mcontext_t *target_mc, int sig) { if (kernel_xfer_callbacks.num == 0) { return false; } dr_kernel_xfer_info_t info; info.type = type; info.source_mcontext = NULL; info.target_pc = target_pc; info.target_xsp = target_xsp; info.sig = sig; dr_mcontext_t dr_mcontext; dr_mcontext.size = sizeof(dr_mcontext); dr_mcontext.flags = DR_MC_CONTROL | DR_MC_INTEGER; if (source_dmc != NULL) info.source_mcontext = source_dmc; else if (source_mc != NULL) { if (priv_mcontext_to_dr_mcontext(&dr_mcontext, source_mc)) info.source_mcontext = &dr_mcontext; } else if (!is_os_cxt_ptr_null(source_os_cxt)) { if (os_context_to_mcontext(&dr_mcontext, NULL, source_os_cxt)) info.source_mcontext = &dr_mcontext; } /* Our compromise to reduce context copying is to provide the PC and XSP inline, * and only get more if the user calls dr_get_mcontext(), which we support again * without any copying if not used by taking in a raw os_context_t. */ dcontext->client_data->os_cxt = target_os_cxt; dcontext->client_data->cur_mc = target_mc; call_all(kernel_xfer_callbacks, int (*)(void *, const dr_kernel_xfer_info_t *), (void *)dcontext, &info); set_os_cxt_ptr_null(&dcontext->client_data->os_cxt); dcontext->client_data->cur_mc = NULL; return true; } # ifdef WINDOWS /* Notify user of exceptions. Note: not called for RaiseException */ bool instrument_exception(dcontext_t *dcontext, dr_exception_t *exception) { bool res = true; /* Ensure that dr_get_mcontext() called from instrument_kernel_xfer() from * dr_redirect_execution() will get the source context. * cur_mc will later be clobbered by instrument_kernel_xfer() which is ok: * the redirect ends the callback calling. */ dcontext->client_data->cur_mc = dr_mcontext_as_priv_mcontext(exception->mcontext); /* We short-circuit if any client wants to "own" the fault and not pass on. * This does violate the "priority order" of events where the last one is * supposed to have final say b/c it won't even see the event: but only one * registrant should own it (xref i#424). */ call_all_ret(res, = res &&, , exception_callbacks, bool (*)(void *, dr_exception_t *), (void *)dcontext, exception); dcontext->client_data->cur_mc = NULL; return res; } # else dr_signal_action_t instrument_signal(dcontext_t *dcontext, dr_siginfo_t *siginfo) { dr_signal_action_t ret = DR_SIGNAL_DELIVER; /* We short-circuit if any client wants to do other than deliver to the app. * This does violate the "priority order" of events where the last one is * supposed to have final say b/c it won't even see the event: but only one * registrant should own the signal (xref i#424). */ call_all_ret(ret, = ret == DR_SIGNAL_DELIVER ?, : ret, signal_callbacks, dr_signal_action_t(*)(void *, dr_siginfo_t *), (void *)dcontext, siginfo); return ret; } bool dr_signal_hook_exists(void) { return (signal_callbacks.num > 0); } # endif /* WINDOWS */ # ifdef PROGRAM_SHEPHERDING /* Notify user when a security violation is detected */ void instrument_security_violation(dcontext_t *dcontext, app_pc target_pc, security_violation_t violation, action_type_t *action) { dr_security_violation_type_t dr_violation; dr_security_violation_action_t dr_action, dr_action_original; app_pc source_pc = NULL; fragment_t *last; dr_mcontext_t dr_mcontext; dr_mcontext_init(&dr_mcontext); if (security_violation_callbacks.num == 0) return; if (!priv_mcontext_to_dr_mcontext(&dr_mcontext, get_mcontext(dcontext))) return; /* FIXME - the source_tag, source_pc, and context can all be incorrect if the * violation ends up occurring in the middle of a bb we're building. See case * 7380 which we should fix in interp.c. */ /* Obtain the source addr to pass to the client. xref case 285 -- * we're using the more heavy-weight solution 2) here, but that * should be okay since we already have the overhead of calling * into the client. */ last = dcontext->last_fragment; if (!TEST(FRAG_FAKE, last->flags)) { cache_pc pc = EXIT_CTI_PC(last, dcontext->last_exit); source_pc = recreate_app_pc(dcontext, pc, last); } /* FIXME - set pc field of dr_mcontext_t. We'll probably want it * for thread start and possibly apc/callback events as well. */ switch (violation) { case STACK_EXECUTION_VIOLATION: dr_violation = DR_RCO_STACK_VIOLATION; break; case HEAP_EXECUTION_VIOLATION: dr_violation = DR_RCO_HEAP_VIOLATION; break; case RETURN_TARGET_VIOLATION: dr_violation = DR_RCT_RETURN_VIOLATION; break; case RETURN_DIRECT_RCT_VIOLATION: ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */ dr_violation = DR_UNKNOWN_VIOLATION; break; case INDIRECT_CALL_RCT_VIOLATION: dr_violation = DR_RCT_INDIRECT_CALL_VIOLATION; break; case INDIRECT_JUMP_RCT_VIOLATION: dr_violation = DR_RCT_INDIRECT_JUMP_VIOLATION; break; default: ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */ dr_violation = DR_UNKNOWN_VIOLATION; break; } switch (*action) { case ACTION_TERMINATE_PROCESS: dr_action = DR_VIOLATION_ACTION_KILL_PROCESS; break; case ACTION_CONTINUE: dr_action = DR_VIOLATION_ACTION_CONTINUE; break; case ACTION_TERMINATE_THREAD: dr_action = DR_VIOLATION_ACTION_KILL_THREAD; break; case ACTION_THROW_EXCEPTION: dr_action = DR_VIOLATION_ACTION_THROW_EXCEPTION; break; default: ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */ dr_action = DR_VIOLATION_ACTION_CONTINUE; break; } dr_action_original = dr_action; /* NOTE - last->tag should be valid here (even if the frag is fake since the * coarse wrappers set the tag). FIXME - for traces we really want the bb tag not * the trace tag, should get that. Of course the only real reason we pass source * tag is because we can't always give a valid source_pc. */ /* Note that the last registered function gets the final crack at * changing the action. */ call_all(security_violation_callbacks, int (*)(void *, void *, app_pc, app_pc, dr_security_violation_type_t, dr_mcontext_t *, dr_security_violation_action_t *), (void *)dcontext, last->tag, source_pc, target_pc, dr_violation, &dr_mcontext, &dr_action); if (dr_action != dr_action_original) { switch (dr_action) { case DR_VIOLATION_ACTION_KILL_PROCESS: *action = ACTION_TERMINATE_PROCESS; break; case DR_VIOLATION_ACTION_KILL_THREAD: *action = ACTION_TERMINATE_THREAD; break; case DR_VIOLATION_ACTION_THROW_EXCEPTION: *action = ACTION_THROW_EXCEPTION; break; case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT: /* FIXME - not safe to implement till case 7380 is fixed. */ CLIENT_ASSERT(false, "action DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT " "not yet supported."); /* note - no break, fall through */ case DR_VIOLATION_ACTION_CONTINUE: *action = ACTION_CONTINUE; break; default: CLIENT_ASSERT(false, "Security violation event callback returned invalid " "action value."); } } } # endif /* Notify the client of a nudge. */ void instrument_nudge(dcontext_t *dcontext, client_id_t id, uint64 arg) { size_t i; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT && dcontext == get_thread_private_dcontext()); /* synch_with_all_threads and flush API assume that client nudge threads * hold no dr locks and are !couldbelinking while in client lib code */ ASSERT_OWN_NO_LOCKS(); ASSERT(!is_couldbelinking(dcontext)); /* find the client the nudge is intended for */ for (i = 0; i < num_client_libs; i++) { /* until we have nudge-arg support (PR 477454), nudges target the 1st client */ if (IF_VMX86_ELSE(true, client_libs[i].id == id)) { break; } } if (i == num_client_libs || client_libs[i].nudge_callbacks.num == 0) return; # ifdef WINDOWS /* count the number of nudge events so we can make sure they're * all finished before exiting */ d_r_mutex_lock(&client_thread_count_lock); if (block_client_nudge_threads) { /* FIXME - would be nice if there was a way to let the external agent know that * the nudge event wasn't delivered (but this only happens when the process * is detaching or exiting). */ d_r_mutex_unlock(&client_thread_count_lock); return; } /* atomic to avoid locking around the dec */ ATOMIC_INC(int, num_client_nudge_threads); d_r_mutex_unlock(&client_thread_count_lock); /* We need to mark this as a client controlled thread for synch_with_all_threads * and otherwise treat it as native. Xref PR 230836 on what to do if this * thread hits native_exec_syscalls hooks. * XXX: this requires extra checks for "not a nudge thread" after IS_CLIENT_THREAD * in get_stack_bounds() and instrument_thread_exit_event(): maybe better * to have synchall checks do extra checks and have IS_CLIENT_THREAD be * false for nudge threads at exit time? */ dcontext->client_data->is_client_thread = true; dcontext->thread_record->under_dynamo_control = false; # else /* support calling dr_get_mcontext() on this thread. the app * context should be intact in the current mcontext except * pc which we set from next_tag. */ CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext, "internal inconsistency in where mcontext is"); dcontext->client_data->mcontext_in_dcontext = true; /* officially get_mcontext() doesn't always set pc: we do anyway */ get_mcontext(dcontext)->pc = dcontext->next_tag; # endif call_all(client_libs[i].nudge_callbacks, int (*)(void *, uint64), (void *)dcontext, arg); # ifdef UNIX dcontext->client_data->mcontext_in_dcontext = false; # else dcontext->thread_record->under_dynamo_control = true; dcontext->client_data->is_client_thread = false; ATOMIC_DEC(int, num_client_nudge_threads); # endif } int get_num_client_threads(void) { int num = IF_WINDOWS_ELSE(num_client_nudge_threads, 0); # ifdef CLIENT_SIDELINE num += num_client_sideline_threads; # endif return num; } # ifdef WINDOWS /* wait for all nudges to finish */ void wait_for_outstanding_nudges() { /* block any new nudge threads from starting */ d_r_mutex_lock(&client_thread_count_lock); SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); block_client_nudge_threads = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); DOLOG(1, LOG_TOP, { if (num_client_nudge_threads > 0) { LOG(GLOBAL, LOG_TOP, 1, "Waiting for %d nudges to finish - app is about to kill all threads " "except the current one.\n", num_client_nudge_threads); } }); /* don't wait if the client requested exit: after all the client might * have done so from a nudge, and if the client does want to exit it's * its own problem if it misses nudges (and external nudgers should use * a finite timeout) */ if (client_requested_exit) { d_r_mutex_unlock(&client_thread_count_lock); return; } while (num_client_nudge_threads > 0) { /* yield with lock released to allow nudges to finish */ d_r_mutex_unlock(&client_thread_count_lock); dr_thread_yield(); d_r_mutex_lock(&client_thread_count_lock); } d_r_mutex_unlock(&client_thread_count_lock); } # endif /* WINDOWS */ /****************************************************************************/ /* EXPORTED ROUTINES */ DR_API /* Creates a DR context that can be used in a standalone program. * WARNING: this context cannot be used as the drcontext for a thread * running under DR control! It is only for standalone programs that * wish to use DR as a library of disassembly, etc. routines. */ void * dr_standalone_init(void) { dcontext_t *dcontext = standalone_init(); return (void *)dcontext; } DR_API void dr_standalone_exit(void) { standalone_exit(); } DR_API /* Aborts the process immediately */ void dr_abort(void) { if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask)) os_dump_core("dr_abort"); os_terminate(NULL, TERMINATE_PROCESS); } DR_API void dr_abort_with_code(int exit_code) { if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask)) os_dump_core("dr_abort"); os_terminate_with_code(NULL, TERMINATE_PROCESS, exit_code); } DR_API void dr_exit_process(int exit_code) { dcontext_t *dcontext = get_thread_private_dcontext(); SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); /* Prevent cleanup from waiting for nudges as this may be called * from a nudge! * Also suppress leak asserts, as it's hard to clean up from * some situations (such as DrMem -crash_at_error). */ client_requested_exit = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); # ifdef WINDOWS if (dcontext != NULL && dcontext->nudge_target != NULL) { /* we need to free the nudge thread stack which may involved * switching stacks so we have the nudge thread invoke * os_terminate for us */ nudge_thread_cleanup(dcontext, true /*kill process*/, exit_code); CLIENT_ASSERT(false, "shouldn't get here"); } # endif if (!is_currently_on_dstack(dcontext) IF_UNIX(&&!is_currently_on_sigaltstack(dcontext))) { /* if on app stack or sigaltstack, avoid incorrect leak assert at exit */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); dr_api_exit = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */ } os_terminate_with_code(dcontext, /* dcontext is required */ TERMINATE_CLEANUP | TERMINATE_PROCESS, exit_code); CLIENT_ASSERT(false, "shouldn't get here"); } DR_API bool dr_create_memory_dump(dr_memory_dump_spec_t *spec) { if (spec->size != sizeof(dr_memory_dump_spec_t)) return false; # ifdef WINDOWS if (TEST(DR_MEMORY_DUMP_LDMP, spec->flags)) return os_dump_core_live(spec->label, spec->ldmp_path, spec->ldmp_path_size); # endif return false; } DR_API /* Returns true if all DynamoRIO caches are thread private. */ bool dr_using_all_private_caches(void) { return !SHARED_FRAGMENTS_ENABLED(); } DR_API void dr_request_synchronized_exit(void) { SYSLOG_INTERNAL_WARNING_ONCE("dr_request_synchronized_exit deprecated: " "use dr_set_process_exit_behavior instead"); } DR_API void dr_set_process_exit_behavior(dr_exit_flags_t flags) { if ((!DYNAMO_OPTION(multi_thread_exit) && TEST(DR_EXIT_MULTI_THREAD, flags)) || (DYNAMO_OPTION(multi_thread_exit) && !TEST(DR_EXIT_MULTI_THREAD, flags))) { options_make_writable(); dynamo_options.multi_thread_exit = TEST(DR_EXIT_MULTI_THREAD, flags); options_restore_readonly(); } if ((!DYNAMO_OPTION(skip_thread_exit_at_exit) && TEST(DR_EXIT_SKIP_THREAD_EXIT, flags)) || (DYNAMO_OPTION(skip_thread_exit_at_exit) && !TEST(DR_EXIT_SKIP_THREAD_EXIT, flags))) { options_make_writable(); dynamo_options.skip_thread_exit_at_exit = TEST(DR_EXIT_SKIP_THREAD_EXIT, flags); options_restore_readonly(); } } void dr_allow_unsafe_static_behavior(void) { loader_allow_unsafe_static_behavior(); } DR_API /* Returns the option string passed along with a client path via DR's * -client_lib option. */ /* i#1736: we now token-delimit with quotes, but for backward compat we need to * pass a version w/o quotes for dr_get_options(). */ const char * dr_get_options(client_id_t id) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { /* If we already converted, pass the result */ if (client_libs[i].legacy_options[0] != '\0' || client_libs[i].options[0] == '\0') return client_libs[i].legacy_options; /* For backward compatibility, we need to remove the token-delimiting * quotes. We tokenize, and then re-assemble the flat string. * i#1755: however, for legacy custom frontends that are not re-quoting * like drrun now is, we need to avoid removing any quotes from the * original strings. We try to detect this by assuming a frontend will * either re-quote everything or nothing. Ideally we would check all * args, but that would require plumbing info from getword() or * duplicating its functionality: so instead our heuristic is just checking * the first and last chars. */ if (!char_is_quote(client_libs[i].options[0]) || /* Emptry string already detected above */ !char_is_quote( client_libs[i].options[strlen(client_libs[i].options) - 1])) { /* At least one arg is not quoted => better use original */ snprintf(client_libs[i].legacy_options, BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options), "%s", client_libs[i].options); } else { int j; size_t sofar = 0; for (j = 1 /*skip client lib*/; j < client_libs[i].argc; j++) { if (!print_to_buffer( client_libs[i].legacy_options, BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options), &sofar, "%s%s", (j == 1) ? "" : " ", client_libs[i].argv[j])) break; } } NULL_TERMINATE_BUFFER(client_libs[i].legacy_options); return client_libs[i].legacy_options; } } CLIENT_ASSERT(false, "dr_get_options(): invalid client id"); return NULL; } DR_API bool dr_get_option_array(client_id_t id, int *argc OUT, const char ***argv OUT) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { *argc = client_libs[i].argc; *argv = client_libs[i].argv; return true; } } CLIENT_ASSERT(false, "dr_get_option_array(): invalid client id"); return false; } DR_API /* Returns the path to the client library. Client must pass its ID */ const char * dr_get_client_path(client_id_t id) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { return client_libs[i].path; } } CLIENT_ASSERT(false, "dr_get_client_path(): invalid client id"); return NULL; } DR_API byte * dr_get_client_base(client_id_t id) { size_t i; for (i = 0; i < num_client_libs; i++) { if (client_libs[i].id == id) { return client_libs[i].start; } } CLIENT_ASSERT(false, "dr_get_client_base(): invalid client id"); return NULL; } DR_API bool dr_set_client_name(const char *name, const char *report_URL) { /* Although set_exception_strings() accepts NULL, clients should pass real vals. */ if (name == NULL || report_URL == NULL) return false; set_exception_strings(name, report_URL); return true; } bool dr_set_client_version_string(const char *version) { if (version == NULL) return false; set_display_version(version); return true; } DR_API const char * dr_get_application_name(void) { # ifdef UNIX return get_application_short_name(); # else return get_application_short_unqualified_name(); # endif } DR_API process_id_t dr_get_process_id(void) { return (process_id_t)get_process_id(); } # ifdef UNIX DR_API process_id_t dr_get_parent_id(void) { return get_parent_id(); } # endif # ifdef WINDOWS DR_API process_id_t dr_convert_handle_to_pid(HANDLE process_handle) { ASSERT(POINTER_MAX == INVALID_PROCESS_ID); return process_id_from_handle(process_handle); } DR_API HANDLE dr_convert_pid_to_handle(process_id_t pid) { return process_handle_from_id(pid); } DR_API /** * Returns information about the version of the operating system. * Returns whether successful. */ bool dr_get_os_version(dr_os_version_info_t *info) { int ver; uint sp_major, sp_minor, build_number; const char *release_id, *edition; get_os_version_ex(&ver, &sp_major, &sp_minor, &build_number, &release_id, &edition); if (info->size > offsetof(dr_os_version_info_t, version)) { switch (ver) { case WINDOWS_VERSION_10_1803: info->version = DR_WINDOWS_VERSION_10_1803; break; case WINDOWS_VERSION_10_1709: info->version = DR_WINDOWS_VERSION_10_1709; break; case WINDOWS_VERSION_10_1703: info->version = DR_WINDOWS_VERSION_10_1703; break; case WINDOWS_VERSION_10_1607: info->version = DR_WINDOWS_VERSION_10_1607; break; case WINDOWS_VERSION_10_1511: info->version = DR_WINDOWS_VERSION_10_1511; break; case WINDOWS_VERSION_10: info->version = DR_WINDOWS_VERSION_10; break; case WINDOWS_VERSION_8_1: info->version = DR_WINDOWS_VERSION_8_1; break; case WINDOWS_VERSION_8: info->version = DR_WINDOWS_VERSION_8; break; case WINDOWS_VERSION_7: info->version = DR_WINDOWS_VERSION_7; break; case WINDOWS_VERSION_VISTA: info->version = DR_WINDOWS_VERSION_VISTA; break; case WINDOWS_VERSION_2003: info->version = DR_WINDOWS_VERSION_2003; break; case WINDOWS_VERSION_XP: info->version = DR_WINDOWS_VERSION_XP; break; case WINDOWS_VERSION_2000: info->version = DR_WINDOWS_VERSION_2000; break; case WINDOWS_VERSION_NT: info->version = DR_WINDOWS_VERSION_NT; break; default: CLIENT_ASSERT(false, "unsupported windows version"); }; } else return false; /* struct too small for any info */ if (info->size > offsetof(dr_os_version_info_t, service_pack_major)) { info->service_pack_major = sp_major; if (info->size > offsetof(dr_os_version_info_t, service_pack_minor)) { info->service_pack_minor = sp_minor; } } if (info->size > offsetof(dr_os_version_info_t, build_number)) { info->build_number = build_number; } if (info->size > offsetof(dr_os_version_info_t, release_id)) { dr_snprintf(info->release_id, BUFFER_SIZE_ELEMENTS(info->release_id), "%s", release_id); NULL_TERMINATE_BUFFER(info->release_id); } if (info->size > offsetof(dr_os_version_info_t, edition)) { dr_snprintf(info->edition, BUFFER_SIZE_ELEMENTS(info->edition), "%s", edition); NULL_TERMINATE_BUFFER(info->edition); } return true; } DR_API bool dr_is_wow64(void) { return is_wow64_process(NT_CURRENT_PROCESS); } DR_API void * dr_get_app_PEB(void) { return get_own_peb(); } # endif DR_API /* Retrieves the current time */ void dr_get_time(dr_time_t *time) { convert_millis_to_date(query_time_millis(), time); } DR_API uint64 dr_get_milliseconds(void) { return query_time_millis(); } DR_API uint64 dr_get_microseconds(void) { return query_time_micros(); } DR_API uint dr_get_random_value(uint max) { return (uint)get_random_offset(max); } DR_API void dr_set_random_seed(uint seed) { d_r_set_random_seed(seed); } DR_API uint dr_get_random_seed(void) { return d_r_get_random_seed(); } /*************************************************************************** * MEMORY ALLOCATION */ DR_API /* Allocates memory from DR's memory pool specific to the * thread associated with drcontext. */ void * dr_thread_alloc(void *drcontext, size_t size) { dcontext_t *dcontext = (dcontext_t *)drcontext; /* For back-compat this is guaranteed-reachable. */ return heap_reachable_alloc(dcontext, size HEAPACCT(ACCT_CLIENT)); } DR_API /* Frees thread-specific memory allocated by dr_thread_alloc. * size must be the same size passed to dr_thread_alloc. */ void dr_thread_free(void *drcontext, void *mem, size_t size) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_thread_free: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_thread_free: drcontext is invalid"); heap_reachable_free(dcontext, mem, size HEAPACCT(ACCT_CLIENT)); } DR_API /* Allocates memory from DR's global memory pool. */ void * dr_global_alloc(size_t size) { /* For back-compat this is guaranteed-reachable. */ return heap_reachable_alloc(GLOBAL_DCONTEXT, size HEAPACCT(ACCT_CLIENT)); } DR_API /* Frees memory allocated by dr_global_alloc. * size must be the same size passed to dr_global_alloc. */ void dr_global_free(void *mem, size_t size) { heap_reachable_free(GLOBAL_DCONTEXT, mem, size HEAPACCT(ACCT_CLIENT)); } DR_API /* PR 352427: API routine to allocate executable memory */ void * dr_nonheap_alloc(size_t size, uint prot) { CLIENT_ASSERT( !TESTALL(DR_MEMPROT_WRITE | DR_MEMPROT_EXEC, prot) || !DYNAMO_OPTION(satisfy_w_xor_x), "reachable executable client memory is not supported with -satisfy_w_xor_x"); return heap_mmap_ex(size, size, prot, false /*no guard pages*/, /* For back-compat we preserve reachability. */ VMM_SPECIAL_MMAP | VMM_REACHABLE); } DR_API void dr_nonheap_free(void *mem, size_t size) { heap_munmap_ex(mem, size, false /*no guard pages*/, VMM_SPECIAL_MMAP | VMM_REACHABLE); } static void * raw_mem_alloc(size_t size, uint prot, void *addr, dr_alloc_flags_t flags) { byte *p; heap_error_code_t error_code; CLIENT_ASSERT(ALIGNED(addr, PAGE_SIZE), "addr is not page size aligned"); if (!TEST(DR_ALLOC_NON_DR, flags)) { /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ } addr = (void *)ALIGN_BACKWARD(addr, PAGE_SIZE); size = ALIGN_FORWARD(size, PAGE_SIZE); # ifdef WINDOWS if (TEST(DR_ALLOC_LOW_2GB, flags)) { CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags), "cannot combine commit-only and low-2GB"); p = os_heap_reserve_in_region(NULL, (byte *)(ptr_uint_t)0x80000000, size, &error_code, TEST(DR_MEMPROT_EXEC, flags)); if (p != NULL && !TEST(DR_ALLOC_RESERVE_ONLY, flags)) { if (!os_heap_commit(p, size, prot, &error_code)) { os_heap_free(p, size, &error_code); p = NULL; } } } else # endif { /* We specify that DR_ALLOC_LOW_2GB only applies to x64, so it's * ok that the Linux kernel will ignore MAP_32BIT for 32-bit. */ # ifdef UNIX uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0; # else uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY : (TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0); # endif if (IF_WINDOWS(TEST(DR_ALLOC_COMMIT_ONLY, flags) &&) addr != NULL && !app_memory_pre_alloc(get_thread_private_dcontext(), addr, size, prot, false)) p = NULL; else p = os_raw_mem_alloc(addr, size, prot, os_flags, &error_code); } if (p != NULL) { if (TEST(DR_ALLOC_NON_DR, flags)) { all_memory_areas_lock(); update_all_memory_areas(p, p + size, prot, DR_MEMTYPE_DATA); all_memory_areas_unlock(); } else { /* this routine updates allmem for us: */ add_dynamo_vm_area((app_pc)p, ((app_pc)p) + size, prot, true _IF_DEBUG("fls cb in private lib")); } RSTATS_ADD_PEAK(client_raw_mmap_size, size); } if (!TEST(DR_ALLOC_NON_DR, flags)) dynamo_vm_areas_unlock(); return p; } static bool raw_mem_free(void *addr, size_t size, dr_alloc_flags_t flags) { bool res; heap_error_code_t error_code; byte *p = addr; # ifdef UNIX uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0; # else uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY : (TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0); # endif size = ALIGN_FORWARD(size, PAGE_SIZE); if (TEST(DR_ALLOC_NON_DR, flags)) { /* use lock to avoid racy update on parallel memory allocation, * e.g. allocation from another thread at p happens after os_heap_free * but before remove_from_all_memory_areas */ all_memory_areas_lock(); } else { /* memory alloc/dealloc and updating DR list must be atomic */ dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ } res = os_raw_mem_free(p, size, os_flags, &error_code); if (TEST(DR_ALLOC_NON_DR, flags)) { remove_from_all_memory_areas(p, p + size); all_memory_areas_unlock(); } else { /* this routine updates allmem for us: */ remove_dynamo_vm_area((app_pc)addr, ((app_pc)addr) + size); } if (!TEST(DR_ALLOC_NON_DR, flags)) dynamo_vm_areas_unlock(); if (res) RSTATS_SUB(client_raw_mmap_size, size); return res; } DR_API void * dr_raw_mem_alloc(size_t size, uint prot, void *addr) { return raw_mem_alloc(size, prot, addr, DR_ALLOC_NON_DR); } DR_API bool dr_raw_mem_free(void *addr, size_t size) { return raw_mem_free(addr, size, DR_ALLOC_NON_DR); } static void * custom_memory_shared(bool alloc, void *drcontext, dr_alloc_flags_t flags, size_t size, uint prot, void *addr, bool *free_res) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(alloc || free_res != NULL, "must ask for free_res on free"); CLIENT_ASSERT(alloc || addr != NULL, "cannot free NULL"); CLIENT_ASSERT(!TESTALL(DR_ALLOC_NON_DR | DR_ALLOC_CACHE_REACHABLE, flags), "dr_custom_alloc: cannot combine non-DR and cache-reachable"); CLIENT_ASSERT(!alloc || TEST(DR_ALLOC_FIXED_LOCATION, flags) || addr == NULL, "dr_custom_alloc: address only honored for fixed location"); # ifdef WINDOWS CLIENT_ASSERT(!TESTANY(DR_ALLOC_RESERVE_ONLY | DR_ALLOC_COMMIT_ONLY, flags) || TESTALL(DR_ALLOC_NON_HEAP | DR_ALLOC_NON_DR, flags), "dr_custom_alloc: reserve/commit-only are only for non-DR non-heap"); CLIENT_ASSERT(!TEST(DR_ALLOC_RESERVE_ONLY, flags) || !TEST(DR_ALLOC_COMMIT_ONLY, flags), "dr_custom_alloc: cannot combine reserve-only + commit-only"); # endif if (TEST(DR_ALLOC_NON_HEAP, flags)) { CLIENT_ASSERT(drcontext == NULL, "dr_custom_alloc: drcontext must be NULL for non-heap"); CLIENT_ASSERT(!TEST(DR_ALLOC_THREAD_PRIVATE, flags), "dr_custom_alloc: non-heap cannot be thread-private"); CLIENT_ASSERT(!TESTALL(DR_ALLOC_CACHE_REACHABLE | DR_ALLOC_LOW_2GB, flags), "dr_custom_alloc: cannot combine low-2GB and cache-reachable"); # ifdef WINDOWS CLIENT_ASSERT(addr != NULL || !TEST(DR_ALLOC_COMMIT_ONLY, flags), "dr_custom_alloc: commit-only requires non-NULL addr"); # endif if (TEST(DR_ALLOC_LOW_2GB, flags)) { # ifdef WINDOWS CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags), "dr_custom_alloc: cannot combine commit-only and low-2GB"); # endif CLIENT_ASSERT(!alloc || addr == NULL, "dr_custom_alloc: cannot pass an addr with low-2GB"); /* Even if not non-DR, easier to allocate via raw */ if (alloc) return raw_mem_alloc(size, prot, addr, flags); else *free_res = raw_mem_free(addr, size, flags); } else if (TEST(DR_ALLOC_NON_DR, flags)) { /* ok for addr to be NULL */ if (alloc) return raw_mem_alloc(size, prot, addr, flags); else *free_res = raw_mem_free(addr, size, flags); } else { /* including DR_ALLOC_CACHE_REACHABLE */ CLIENT_ASSERT(!alloc || !TEST(DR_ALLOC_CACHE_REACHABLE, flags) || addr == NULL, "dr_custom_alloc: cannot ask for addr and cache-reachable"); /* This flag is here solely so we know which version of free to call */ if (TEST(DR_ALLOC_FIXED_LOCATION, flags) || !TEST(DR_ALLOC_CACHE_REACHABLE, flags)) { CLIENT_ASSERT(addr != NULL || !TEST(DR_ALLOC_FIXED_LOCATION, flags), "dr_custom_alloc: fixed location requires an address"); if (alloc) return raw_mem_alloc(size, prot, addr, 0); else *free_res = raw_mem_free(addr, size, 0); } else { if (alloc) return dr_nonheap_alloc(size, prot); else { *free_res = true; dr_nonheap_free(addr, size); } } } } else { if (!alloc) *free_res = true; CLIENT_ASSERT(!alloc || addr == NULL, "dr_custom_alloc: cannot pass an addr for heap memory"); CLIENT_ASSERT(drcontext == NULL || TEST(DR_ALLOC_THREAD_PRIVATE, flags), "dr_custom_alloc: drcontext must be NULL for global heap"); CLIENT_ASSERT(!TEST(DR_ALLOC_LOW_2GB, flags), "dr_custom_alloc: cannot ask for heap in low 2GB"); CLIENT_ASSERT(!TEST(DR_ALLOC_NON_DR, flags), "dr_custom_alloc: cannot ask for non-DR heap memory"); if (TEST(DR_ALLOC_CACHE_REACHABLE, flags)) { if (TEST(DR_ALLOC_THREAD_PRIVATE, flags)) { if (alloc) return dr_thread_alloc(drcontext, size); else dr_thread_free(drcontext, addr, size); } else { if (alloc) return dr_global_alloc(size); else dr_global_free(addr, size); } } else { if (TEST(DR_ALLOC_THREAD_PRIVATE, flags)) { if (alloc) return heap_alloc(dcontext, size HEAPACCT(ACCT_CLIENT)); else heap_free(dcontext, addr, size HEAPACCT(ACCT_CLIENT)); } else { if (alloc) return global_heap_alloc(size HEAPACCT(ACCT_CLIENT)); else global_heap_free(addr, size HEAPACCT(ACCT_CLIENT)); } } } return NULL; } DR_API void * dr_custom_alloc(void *drcontext, dr_alloc_flags_t flags, size_t size, uint prot, void *addr) { return custom_memory_shared(true, drcontext, flags, size, prot, addr, NULL); } DR_API bool dr_custom_free(void *drcontext, dr_alloc_flags_t flags, void *addr, size_t size) { bool res; custom_memory_shared(false, drcontext, flags, size, 0, addr, &res); return res; } # ifdef UNIX DR_API /* With ld's -wrap option, we can supply a replacement for malloc. * This routine allocates memory from DR's global memory pool. Unlike * dr_global_alloc(), however, we store the size of the allocation in * the first few bytes so __wrap_free() can retrieve it. */ void * __wrap_malloc(size_t size) { return redirect_malloc(size); } DR_API /* With ld's -wrap option, we can supply a replacement for realloc. * This routine allocates memory from DR's global memory pool. Unlike * dr_global_alloc(), however, we store the size of the allocation in * the first few bytes so __wrap_free() can retrieve it. */ void * __wrap_realloc(void *mem, size_t size) { return redirect_realloc(mem, size); } DR_API /* With ld's -wrap option, we can supply a replacement for calloc. * This routine allocates memory from DR's global memory pool. Unlike * dr_global_alloc(), however, we store the size of the allocation in * the first few bytes so __wrap_free() can retrieve it. */ void * __wrap_calloc(size_t nmemb, size_t size) { return redirect_calloc(nmemb, size); } DR_API /* With ld's -wrap option, we can supply a replacement for free. This * routine frees memory allocated by __wrap_alloc and expects the * allocation size to be available in the few bytes before 'mem'. */ void __wrap_free(void *mem) { redirect_free(mem); } # endif DR_API bool dr_memory_protect(void *base, size_t size, uint new_prot) { /* We do allow the client to modify DR memory, for allocating a * region and later making it unwritable. We should probably * allow modifying ntdll, since our general model is to trust the * client and let it shoot itself in the foot, but that would require * passing in extra args to app_memory_protection_change() to ignore * the patch_proof_list: and maybe it is safer to disallow client * from putting hooks in ntdll. */ CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); if (!dynamo_vm_area_overlap(base, ((byte *)base) + size)) { uint mod_prot = new_prot; uint res = app_memory_protection_change(get_thread_private_dcontext(), base, size, new_prot, &mod_prot, NULL); if (res != DO_APP_MEM_PROT_CHANGE) { if (res == FAIL_APP_MEM_PROT_CHANGE || res == PRETEND_APP_MEM_PROT_CHANGE) { return false; } else { /* SUBSET_APP_MEM_PROT_CHANGE should only happen for * PROGRAM_SHEPHERDING. FIXME: not sure how common * this will be: for now we just fail. */ return false; } } CLIENT_ASSERT(mod_prot == new_prot, "internal error on dr_memory_protect()"); } return set_protection(base, size, new_prot); } DR_API size_t dr_page_size(void) { return os_page_size(); } DR_API /* checks to see that all bytes with addresses from pc to pc+size-1 * are readable and that reading from there won't generate an exception. */ bool dr_memory_is_readable(const byte *pc, size_t size) { return is_readable_without_exception(pc, size); } DR_API /* OS neutral memory query for clients, just wrapper around our get_memory_info(). */ bool dr_query_memory(const byte *pc, byte **base_pc, size_t *size, uint *prot) { uint real_prot; bool res; # if defined(UNIX) && defined(HAVE_MEMINFO) /* xref PR 246897 - the cached all memory list can have problems when * out-of-process entities change the mapings. For now we use the from * os version instead (even though it's slower, and only if we have * HAVE_MEMINFO_MAPS support). FIXME * XXX i#853: We could decide allmem vs os with the use_all_memory_areas * option. */ res = get_memory_info_from_os(pc, base_pc, size, &real_prot); # else res = get_memory_info(pc, base_pc, size, &real_prot); # endif if (prot != NULL) { if (is_pretend_or_executable_writable((app_pc)pc)) { /* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod * as executable-but-writable and we'll come here. */ real_prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE; } *prot = real_prot; } return res; } DR_API bool dr_query_memory_ex(const byte *pc, OUT dr_mem_info_t *info) { bool res; # if defined(UNIX) && defined(HAVE_MEMINFO) /* PR 246897: all_memory_areas not ready for prime time */ res = query_memory_ex_from_os(pc, info); # else res = query_memory_ex(pc, info); # endif if (is_pretend_or_executable_writable((app_pc)pc)) { /* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod * as executable-but-writable and we'll come here. */ info->prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE; } return res; } DR_API /* Wrapper around our safe_read. Xref P4 198875, placeholder till we have try/except */ bool dr_safe_read(const void *base, size_t size, void *out_buf, size_t *bytes_read) { return safe_read_ex(base, size, out_buf, bytes_read); } DR_API /* Wrapper around our safe_write. Xref P4 198875, placeholder till we have try/except */ bool dr_safe_write(void *base, size_t size, const void *in_buf, size_t *bytes_written) { return safe_write_ex(base, size, in_buf, bytes_written); } DR_API void dr_try_setup(void *drcontext, void **try_cxt) { /* Yes we're duplicating the code from the TRY() macro but this * provides better abstraction and lets us change our impl later * vs exposing that macro */ dcontext_t *dcontext = (dcontext_t *)drcontext; try_except_context_t *try_state; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext()); ASSERT(try_cxt != NULL); /* We allocate on the heap to avoid having to expose the try_except_context_t * and dr_jmp_buf_t structs and be tied to their exact layouts. * The client is likely to allocate memory inside the try anyway * if doing a decode or something. */ try_state = (try_except_context_t *)HEAP_TYPE_ALLOC(dcontext, try_except_context_t, ACCT_CLIENT, PROTECTED); *try_cxt = try_state; try_state->prev_context = dcontext->try_except.try_except_state; dcontext->try_except.try_except_state = try_state; } /* dr_try_start() is in x86.asm since we can't have an extra frame that's * going to be torn down between the longjmp and the restore point */ DR_API void dr_try_stop(void *drcontext, void *try_cxt) { dcontext_t *dcontext = (dcontext_t *)drcontext; try_except_context_t *try_state = (try_except_context_t *)try_cxt; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext()); ASSERT(try_state != NULL); POP_TRY_BLOCK(&dcontext->try_except, *try_state); HEAP_TYPE_FREE(dcontext, try_state, try_except_context_t, ACCT_CLIENT, PROTECTED); } DR_API bool dr_memory_is_dr_internal(const byte *pc) { return is_dynamo_address((app_pc)pc); } DR_API bool dr_memory_is_in_client(const byte *pc) { return is_in_client_lib((app_pc)pc); } void instrument_client_lib_loaded(byte *start, byte *end) { /* i#852: include Extensions as they are really part of the clients and * aren't like other private libs. * XXX: we only avoid having the client libs on here b/c they're specified via * full path and don't go through the loaders' locate routines. * Not a big deal if they do end up on here: if they always did we could * remove the linear walk in is_in_client_lib(). */ /* called prior to instrument_init() */ init_client_aux_libs(); vmvector_add(client_aux_libs, start, end, NULL /*not an auxlib*/); } void instrument_client_lib_unloaded(byte *start, byte *end) { /* called after instrument_exit() */ if (client_aux_libs != NULL) vmvector_remove(client_aux_libs, start, end); } /************************************************** * CLIENT AUXILIARY LIBRARIES */ DR_API dr_auxlib_handle_t dr_load_aux_library(const char *name, byte **lib_start /*OPTIONAL OUT*/, byte **lib_end /*OPTIONAL OUT*/) { byte *start, *end; dr_auxlib_handle_t lib = load_shared_library(name, true /*reachable*/); if (shared_library_bounds(lib, NULL, name, &start, &end)) { /* be sure to replace b/c i#852 now adds during load w/ empty data */ vmvector_add_replace(client_aux_libs, start, end, (void *)lib); if (lib_start != NULL) *lib_start = start; if (lib_end != NULL) *lib_end = end; all_memory_areas_lock(); update_all_memory_areas(start, end, /* XXX: see comment in instrument_init() * on walking the sections and what prot to use */ MEMPROT_READ, DR_MEMTYPE_IMAGE); all_memory_areas_unlock(); } else { unload_shared_library(lib); lib = NULL; } return lib; } DR_API dr_auxlib_routine_ptr_t dr_lookup_aux_library_routine(dr_auxlib_handle_t lib, const char *name) { if (lib == NULL) return NULL; return lookup_library_routine(lib, name); } DR_API bool dr_unload_aux_library(dr_auxlib_handle_t lib) { byte *start = NULL, *end = NULL; /* unfortunately on linux w/ dlopen we cannot find the bounds w/o * either the path or an address so we iterate. * once we have our private loader we shouldn't need this: * XXX i#157 */ vmvector_iterator_t vmvi; dr_auxlib_handle_t found = NULL; if (lib == NULL) return false; vmvector_iterator_start(client_aux_libs, &vmvi); while (vmvector_iterator_hasnext(&vmvi)) { found = (dr_auxlib_handle_t)vmvector_iterator_next(&vmvi, &start, &end); if (found == lib) break; } vmvector_iterator_stop(&vmvi); if (found == lib) { CLIENT_ASSERT(start != NULL && start < end, "logic error"); vmvector_remove(client_aux_libs, start, end); unload_shared_library(lib); all_memory_areas_lock(); update_all_memory_areas(start, end, MEMPROT_NONE, DR_MEMTYPE_FREE); all_memory_areas_unlock(); return true; } else { CLIENT_ASSERT(false, "invalid aux lib"); return false; } } # if defined(WINDOWS) && !defined(X64) /* XXX i#1633: these routines all have 64-bit handle and routine types for * handling win8's high ntdll64 in the future. For now the implementation * treats them as 32-bit types and we do not support win8+. */ DR_API dr_auxlib64_handle_t dr_load_aux_x64_library(const char *name) { HANDLE h; /* We use the x64 system loader. We assume that x64 state is fine being * interrupted at arbitrary points during x86 execution, and that there * is little risk of transparency violations. */ /* load_library_64() is racy. We don't expect anyone else to load * x64 libs, but another thread in this client could, so we * serialize here. */ d_r_mutex_lock(&client_aux_lib64_lock); /* XXX: if we switch to our private loader we'll need to add custom * search support to look in 64-bit system dir */ /* XXX: I'd add to the client_aux_libs vector, but w/ the system loader * loading this I don't know all the dependent libs it might load. * Not bothering for now. */ h = load_library_64(name); d_r_mutex_unlock(&client_aux_lib64_lock); return (dr_auxlib64_handle_t)h; } DR_API dr_auxlib64_routine_ptr_t dr_lookup_aux_x64_library_routine(dr_auxlib64_handle_t lib, const char *name) { uint64 res = get_proc_address_64((uint64)lib, name); return (dr_auxlib64_routine_ptr_t)res; } DR_API bool dr_unload_aux_x64_library(dr_auxlib64_handle_t lib) { bool res; d_r_mutex_lock(&client_aux_lib64_lock); res = free_library_64((HANDLE)(uint)lib); /* uint cast to avoid cl warning */ d_r_mutex_unlock(&client_aux_lib64_lock); return res; } # endif /*************************************************************************** * LOCKS */ DR_API /* Initializes a mutex */ void * dr_mutex_create(void) { void *mutex = (void *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, mutex_t, ACCT_CLIENT, UNPROTECTED); ASSIGN_INIT_LOCK_FREE(*((mutex_t *)mutex), dr_client_mutex); return mutex; } DR_API /* Deletes mutex */ void dr_mutex_destroy(void *mutex) { /* Delete mutex so locks_not_closed()==0 test in dynamo.c passes */ DELETE_LOCK(*((mutex_t *)mutex)); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (mutex_t *)mutex, mutex_t, ACCT_CLIENT, UNPROTECTED); } DR_API /* Locks mutex */ void dr_mutex_lock(void *mutex) { dcontext_t *dcontext = get_thread_private_dcontext(); /* set client_grab_mutex so that we know to set client_thread_safe_for_synch * around the actual wait for the lock */ if (IS_CLIENT_THREAD(dcontext)) { dcontext->client_data->client_grab_mutex = mutex; /* We do this on the outside so that we're conservative wrt races * in the direction of not killing the thread while it has a lock */ dcontext->client_data->mutex_count++; } d_r_mutex_lock((mutex_t *)mutex); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_grab_mutex = NULL; } DR_API /* Unlocks mutex */ void dr_mutex_unlock(void *mutex) { dcontext_t *dcontext = get_thread_private_dcontext(); d_r_mutex_unlock((mutex_t *)mutex); /* We do this on the outside so that we're conservative wrt races * in the direction of not killing the thread while it has a lock */ if (IS_CLIENT_THREAD(dcontext)) { CLIENT_ASSERT(dcontext->client_data->mutex_count > 0, "internal client mutex nesting error"); dcontext->client_data->mutex_count--; } } DR_API /* Tries once to grab the lock, returns whether or not successful */ bool dr_mutex_trylock(void *mutex) { bool success = false; dcontext_t *dcontext = get_thread_private_dcontext(); /* set client_grab_mutex so that we know to set client_thread_safe_for_synch * around the actual wait for the lock */ if (IS_CLIENT_THREAD(dcontext)) { dcontext->client_data->client_grab_mutex = mutex; /* We do this on the outside so that we're conservative wrt races * in the direction of not killing the thread while it has a lock */ dcontext->client_data->mutex_count++; } success = d_r_mutex_trylock((mutex_t *)mutex); if (IS_CLIENT_THREAD(dcontext)) { if (!success) dcontext->client_data->mutex_count--; dcontext->client_data->client_grab_mutex = NULL; } return success; } DR_API bool dr_mutex_self_owns(void *mutex) { return IF_DEBUG_ELSE(OWN_MUTEX((mutex_t *)mutex), true); } DR_API bool dr_mutex_mark_as_app(void *mutex) { mutex_t *lock = (mutex_t *)mutex; d_r_mutex_mark_as_app(lock); return true; } DR_API void * dr_rwlock_create(void) { void *rwlock = (void *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, read_write_lock_t, ACCT_CLIENT, UNPROTECTED); ASSIGN_INIT_READWRITE_LOCK_FREE(*((read_write_lock_t *)rwlock), dr_client_mutex); return rwlock; } DR_API void dr_rwlock_destroy(void *rwlock) { DELETE_READWRITE_LOCK(*((read_write_lock_t *)rwlock)); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (read_write_lock_t *)rwlock, read_write_lock_t, ACCT_CLIENT, UNPROTECTED); } DR_API void dr_rwlock_read_lock(void *rwlock) { d_r_read_lock((read_write_lock_t *)rwlock); } DR_API void dr_rwlock_read_unlock(void *rwlock) { d_r_read_unlock((read_write_lock_t *)rwlock); } DR_API void dr_rwlock_write_lock(void *rwlock) { d_r_write_lock((read_write_lock_t *)rwlock); } DR_API void dr_rwlock_write_unlock(void *rwlock) { d_r_write_unlock((read_write_lock_t *)rwlock); } DR_API bool dr_rwlock_write_trylock(void *rwlock) { return d_r_write_trylock((read_write_lock_t *)rwlock); } DR_API bool dr_rwlock_self_owns_write_lock(void *rwlock) { return self_owns_write_lock((read_write_lock_t *)rwlock); } DR_API bool dr_rwlock_mark_as_app(void *rwlock) { read_write_lock_t *lock = (read_write_lock_t *)rwlock; d_r_mutex_mark_as_app(&lock->lock); return true; } DR_API void * dr_recurlock_create(void) { void *reclock = (void *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, recursive_lock_t, ACCT_CLIENT, UNPROTECTED); ASSIGN_INIT_RECURSIVE_LOCK_FREE(*((recursive_lock_t *)reclock), dr_client_mutex); return reclock; } DR_API void dr_recurlock_destroy(void *reclock) { DELETE_RECURSIVE_LOCK(*((recursive_lock_t *)reclock)); HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (recursive_lock_t *)reclock, recursive_lock_t, ACCT_CLIENT, UNPROTECTED); } DR_API void dr_recurlock_lock(void *reclock) { acquire_recursive_lock((recursive_lock_t *)reclock); } DR_API void dr_app_recurlock_lock(void *reclock, dr_mcontext_t *mc) { CLIENT_ASSERT(mc->flags == DR_MC_ALL, "mcontext must be for DR_MC_ALL"); acquire_recursive_app_lock((recursive_lock_t *)reclock, dr_mcontext_as_priv_mcontext(mc)); } DR_API void dr_recurlock_unlock(void *reclock) { release_recursive_lock((recursive_lock_t *)reclock); } DR_API bool dr_recurlock_trylock(void *reclock) { return try_recursive_lock((recursive_lock_t *)reclock); } DR_API bool dr_recurlock_self_owns(void *reclock) { return self_owns_recursive_lock((recursive_lock_t *)reclock); } DR_API bool dr_recurlock_mark_as_app(void *reclock) { recursive_lock_t *lock = (recursive_lock_t *)reclock; d_r_mutex_mark_as_app(&lock->lock); return true; } DR_API void * dr_event_create(void) { return (void *)create_event(); } DR_API bool dr_event_destroy(void *event) { destroy_event((event_t)event); return true; } DR_API bool dr_event_wait(void *event) { dcontext_t *dcontext = get_thread_private_dcontext(); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = true; wait_for_event((event_t)event, 0); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = false; return true; } DR_API bool dr_event_signal(void *event) { signal_event((event_t)event); return true; } DR_API bool dr_event_reset(void *event) { reset_event((event_t)event); return true; } DR_API bool dr_mark_safe_to_suspend(void *drcontext, bool enter) { dcontext_t *dcontext = (dcontext_t *)drcontext; ASSERT_OWN_NO_LOCKS(); /* We need to return so we can't call check_wait_at_safe_spot(). * We don't set mcontext b/c noone should examine it. */ if (enter) set_synch_state(dcontext, THREAD_SYNCH_NO_LOCKS_NO_XFER); else set_synch_state(dcontext, THREAD_SYNCH_NONE); return true; } DR_API int dr_atomic_add32_return_sum(volatile int *x, int val) { return atomic_add_exchange_int(x, val); } /*************************************************************************** * MODULES */ DR_API /* Looks up the module data containing pc. Returns NULL if not found. * Returned module_data_t must be freed with dr_free_module_data(). */ module_data_t * dr_lookup_module(byte *pc) { module_area_t *area; module_data_t *client_data; os_get_module_info_lock(); area = module_pc_lookup(pc); client_data = copy_module_area_to_module_data(area); os_get_module_info_unlock(); return client_data; } DR_API module_data_t * dr_get_main_module(void) { return dr_lookup_module(get_image_entry()); } DR_API /* Looks up the module with name matching name (ignoring case). Returns NULL if not * found. Returned module_data_t must be freed with dr_free_module_data(). */ module_data_t * dr_lookup_module_by_name(const char *name) { /* We have no quick way of doing this since our module list is indexed by pc. We * could use get_module_handle() but that's dangerous to call at arbitrary times, * so we just walk our full list here. */ module_iterator_t *mi = module_iterator_start(); CLIENT_ASSERT((name != NULL), "dr_lookup_module_info_by_name: null name"); while (module_iterator_hasnext(mi)) { module_area_t *area = module_iterator_next(mi); module_data_t *client_data; const char *modname = GET_MODULE_NAME(&area->names); if (modname != NULL && strcasecmp(modname, name) == 0) { client_data = copy_module_area_to_module_data(area); module_iterator_stop(mi); return client_data; } } module_iterator_stop(mi); return NULL; } typedef struct _client_mod_iterator_list_t { module_data_t *info; struct _client_mod_iterator_list_t *next; } client_mod_iterator_list_t; typedef struct { client_mod_iterator_list_t *current; client_mod_iterator_list_t *full_list; } client_mod_iterator_t; DR_API /* Initialize a new client module iterator. */ dr_module_iterator_t * dr_module_iterator_start(void) { client_mod_iterator_t *client_iterator = (client_mod_iterator_t *)HEAP_TYPE_ALLOC( GLOBAL_DCONTEXT, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED); module_iterator_t *dr_iterator = module_iterator_start(); memset(client_iterator, 0, sizeof(*client_iterator)); while (module_iterator_hasnext(dr_iterator)) { module_area_t *area = module_iterator_next(dr_iterator); client_mod_iterator_list_t *list = (client_mod_iterator_list_t *)HEAP_TYPE_ALLOC( GLOBAL_DCONTEXT, client_mod_iterator_list_t, ACCT_CLIENT, UNPROTECTED); ASSERT(area != NULL); list->info = copy_module_area_to_module_data(area); list->next = NULL; if (client_iterator->current == NULL) { client_iterator->current = list; client_iterator->full_list = client_iterator->current; } else { client_iterator->current->next = list; client_iterator->current = client_iterator->current->next; } } module_iterator_stop(dr_iterator); client_iterator->current = client_iterator->full_list; return (dr_module_iterator_t)client_iterator; } DR_API /* Returns true if there is another loaded module in the iterator. */ bool dr_module_iterator_hasnext(dr_module_iterator_t *mi) { CLIENT_ASSERT((mi != NULL), "dr_module_iterator_hasnext: null iterator"); return ((client_mod_iterator_t *)mi)->current != NULL; } DR_API /* Retrieves the module_data_t for the next loaded module in the iterator. */ module_data_t * dr_module_iterator_next(dr_module_iterator_t *mi) { module_data_t *data; client_mod_iterator_t *ci = (client_mod_iterator_t *)mi; CLIENT_ASSERT((mi != NULL), "dr_module_iterator_next: null iterator"); CLIENT_ASSERT((ci->current != NULL), "dr_module_iterator_next: has no next, use " "dr_module_iterator_hasnext() first"); if (ci->current == NULL) return NULL; data = ci->current->info; ci->current = ci->current->next; return data; } DR_API /* Free the module iterator. */ void dr_module_iterator_stop(dr_module_iterator_t *mi) { client_mod_iterator_t *ci = (client_mod_iterator_t *)mi; CLIENT_ASSERT((mi != NULL), "dr_module_iterator_stop: null iterator"); /* free module_data_t's we didn't give to the client */ while (ci->current != NULL) { dr_free_module_data(ci->current->info); ci->current = ci->current->next; } ci->current = ci->full_list; while (ci->current != NULL) { client_mod_iterator_list_t *next = ci->current->next; HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci->current, client_mod_iterator_list_t, ACCT_CLIENT, UNPROTECTED); ci->current = next; } HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ci, client_mod_iterator_t, ACCT_CLIENT, UNPROTECTED); } DR_API /* Get the name dr uses for this module. */ const char * dr_module_preferred_name(const module_data_t *data) { if (data == NULL) return NULL; return GET_MODULE_NAME(&data->names); } # ifdef WINDOWS DR_API /* If pc is within a section of module lib returns true and (optionally) a copy of * the IMAGE_SECTION_HEADER in section_out. If pc is not within a section of the * module mod return false. */ bool dr_lookup_module_section(module_handle_t lib, byte *pc, IMAGE_SECTION_HEADER *section_out) { CLIENT_ASSERT((lib != NULL), "dr_lookup_module_section: null module_handle_t"); return module_pc_section_lookup((app_pc)lib, pc, section_out); } # endif /* i#805: Instead of exposing multiple instruction levels, we expose a way for * clients to turn off instrumentation. Then DR can avoid a full decode and we * can save some time on modules that are not interesting. * XXX: This breaks other clients and extensions, in particular drwrap, which * can miss call and return sites in the uninstrumented module. */ DR_API bool dr_module_set_should_instrument(module_handle_t handle, bool should_instrument) { module_area_t *ma; DEBUG_DECLARE(dcontext_t *dcontext = get_thread_private_dcontext()); IF_DEBUG(executable_areas_lock()); os_get_module_info_write_lock(); ma = module_pc_lookup((byte *)handle); if (ma != NULL) { /* This kind of obviates the need for handle, but it makes the API more * explicit. */ CLIENT_ASSERT(dcontext->client_data->no_delete_mod_data->handle == handle, "Do not call dr_module_set_should_instrument() outside " "of the module's own load event"); ASSERT(!executable_vm_area_executed_from(ma->start, ma->end)); if (should_instrument) { ma->flags &= ~MODULE_NULL_INSTRUMENT; } else { ma->flags |= MODULE_NULL_INSTRUMENT; } } os_get_module_info_write_unlock(); IF_DEBUG(executable_areas_unlock()); return (ma != NULL); } DR_API bool dr_module_should_instrument(module_handle_t handle) { bool should_instrument = true; module_area_t *ma; os_get_module_info_lock(); ma = module_pc_lookup((byte *)handle); CLIENT_ASSERT(ma != NULL, "invalid module handle"); if (ma != NULL) { should_instrument = !TEST(MODULE_NULL_INSTRUMENT, ma->flags); } os_get_module_info_unlock(); return should_instrument; } DR_API /* Returns the entry point of the function with the given name in the module * with the given handle. * We're not taking in module_data_t to make it simpler for the client * to iterate or lookup the module_data_t, store the single-field * handle, and then free the data right away: besides, module_data_t * is not an opaque type. */ generic_func_t dr_get_proc_address(module_handle_t lib, const char *name) { # ifdef WINDOWS return get_proc_address_resolve_forward(lib, name); # else return d_r_get_proc_address(lib, name); # endif } DR_API bool dr_get_proc_address_ex(module_handle_t lib, const char *name, dr_export_info_t *info OUT, size_t info_len) { /* If we add new fields we'll check various values of info_len */ if (info == NULL || info_len < sizeof(*info)) return false; # ifdef WINDOWS info->address = get_proc_address_resolve_forward(lib, name); info->is_indirect_code = false; # else info->address = get_proc_address_ex(lib, name, &info->is_indirect_code); # endif return (info->address != NULL); } byte * dr_map_executable_file(const char *filename, dr_map_executable_flags_t flags, size_t *size OUT) { # ifdef MACOS /* XXX i#1285: implement private loader on Mac */ return NULL; # else modload_flags_t mflags = MODLOAD_NOT_PRIVLIB; if (TEST(DR_MAPEXE_SKIP_WRITABLE, flags)) mflags |= MODLOAD_SKIP_WRITABLE; if (filename == NULL) return NULL; return privload_map_and_relocate(filename, size, mflags); # endif } bool dr_unmap_executable_file(byte *base, size_t size) { return d_r_unmap_file(base, size); } DR_API /* Creates a new directory. Fails if the directory already exists * or if it can't be created. */ bool dr_create_dir(const char *fname) { return os_create_dir(fname, CREATE_DIR_REQUIRE_NEW); } DR_API bool dr_delete_dir(const char *fname) { return os_delete_dir(fname); } DR_API bool dr_get_current_directory(char *buf, size_t bufsz) { return os_get_current_dir(buf, bufsz); } DR_API /* Checks existence of a directory. */ bool dr_directory_exists(const char *fname) { return os_file_exists(fname, true); } DR_API /* Checks for the existence of a file. */ bool dr_file_exists(const char *fname) { return os_file_exists(fname, false); } DR_API /* Opens a file in the mode specified by mode_flags. * Returns INVALID_FILE if unsuccessful */ file_t dr_open_file(const char *fname, uint mode_flags) { uint flags = 0; if (TEST(DR_FILE_WRITE_REQUIRE_NEW, mode_flags)) { flags |= OS_OPEN_WRITE | OS_OPEN_REQUIRE_NEW; } if (TEST(DR_FILE_WRITE_APPEND, mode_flags)) { CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected"); flags |= OS_OPEN_WRITE | OS_OPEN_APPEND; } if (TEST(DR_FILE_WRITE_OVERWRITE, mode_flags)) { CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected"); flags |= OS_OPEN_WRITE; } if (TEST(DR_FILE_WRITE_ONLY, mode_flags)) { CLIENT_ASSERT((flags == 0), "dr_open_file: multiple write modes selected"); flags |= OS_OPEN_WRITE_ONLY; } if (TEST(DR_FILE_READ, mode_flags)) flags |= OS_OPEN_READ; CLIENT_ASSERT((flags != 0), "dr_open_file: no mode selected"); if (TEST(DR_FILE_ALLOW_LARGE, mode_flags)) flags |= OS_OPEN_ALLOW_LARGE; if (TEST(DR_FILE_CLOSE_ON_FORK, mode_flags)) flags |= OS_OPEN_CLOSE_ON_FORK; /* all client-opened files are protected */ return os_open_protected(fname, flags); } DR_API /* Closes file f */ void dr_close_file(file_t f) { /* all client-opened files are protected */ os_close_protected(f); } DR_API /* Renames the file src to dst. */ bool dr_rename_file(const char *src, const char *dst, bool replace) { return os_rename_file(src, dst, replace); } DR_API /* Deletes a file. */ bool dr_delete_file(const char *filename) { /* os_delete_mapped_file should be a superset of os_delete_file, so we use * it. */ return os_delete_mapped_file(filename); } DR_API /* Flushes any buffers for file f */ void dr_flush_file(file_t f) { os_flush(f); } DR_API /* Writes count bytes from buf to f. * Returns the actual number written. */ ssize_t dr_write_file(file_t f, const void *buf, size_t count) { # ifdef WINDOWS if ((f == STDOUT || f == STDERR) && print_to_console) return dr_write_to_console_varg(f == STDOUT, "%.*s", count, buf); else # endif return os_write(f, buf, count); } DR_API /* Reads up to count bytes from f into buf. * Returns the actual number read. */ ssize_t dr_read_file(file_t f, void *buf, size_t count) { return os_read(f, buf, count); } DR_API /* sets the current file position for file f to offset bytes from the specified origin * returns true if successful */ bool dr_file_seek(file_t f, int64 offset, int origin) { CLIENT_ASSERT(origin == DR_SEEK_SET || origin == DR_SEEK_CUR || origin == DR_SEEK_END, "dr_file_seek: invalid origin value"); return os_seek(f, offset, origin); } DR_API /* gets the current file position for file f in bytes from start of file */ int64 dr_file_tell(file_t f) { return os_tell(f); } DR_API file_t dr_dup_file_handle(file_t f) { # ifdef UNIX /* returns -1 on failure == INVALID_FILE */ return dup_syscall(f); # else HANDLE ht = INVALID_HANDLE_VALUE; NTSTATUS res = duplicate_handle(NT_CURRENT_PROCESS, f, NT_CURRENT_PROCESS, &ht, SYNCHRONIZE, 0, DUPLICATE_SAME_ACCESS | DUPLICATE_SAME_ATTRIBUTES); if (!NT_SUCCESS(res)) return INVALID_FILE; else return ht; # endif } DR_API bool dr_file_size(file_t fd, OUT uint64 *size) { return os_get_file_size_by_handle(fd, size); } DR_API void * dr_map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot, uint flags) { return (void *)d_r_map_file( f, size, offs, addr, prot, (TEST(DR_MAP_PRIVATE, flags) ? MAP_FILE_COPY_ON_WRITE : 0) | IF_WINDOWS((TEST(DR_MAP_IMAGE, flags) ? MAP_FILE_IMAGE : 0) |) IF_UNIX((TEST(DR_MAP_FIXED, flags) ? MAP_FILE_FIXED : 0) |)( TEST(DR_MAP_CACHE_REACHABLE, flags) ? MAP_FILE_REACHABLE : 0)); } DR_API bool dr_unmap_file(void *map, size_t size) { dr_mem_info_t info; CLIENT_ASSERT(ALIGNED(map, PAGE_SIZE), "dr_unmap_file: map is not page aligned"); if (!dr_query_memory_ex(map, &info) /* fail to query */ || info.type == DR_MEMTYPE_FREE /* not mapped file */) { CLIENT_ASSERT(false, "dr_unmap_file: incorrect file map"); return false; } # ifdef WINDOWS /* On Windows, the whole file will be unmapped instead, so we adjust * the bound to make sure vm_areas are updated correctly. */ map = info.base_pc; if (info.type == DR_MEMTYPE_IMAGE) { size = get_allocation_size(map, NULL); } else size = info.size; # endif return d_r_unmap_file((byte *)map, size); } DR_API void dr_log(void *drcontext, uint mask, uint level, const char *fmt, ...) { # ifdef DEBUG dcontext_t *dcontext = (dcontext_t *)drcontext; va_list ap; if (d_r_stats != NULL && ((d_r_stats->logmask & mask) == 0 || d_r_stats->loglevel < level)) return; va_start(ap, fmt); if (dcontext != NULL) do_file_write(dcontext->logfile, fmt, ap); else do_file_write(main_logfile, fmt, ap); va_end(ap); # else return; /* no logging if not debug */ # endif } DR_API /* Returns the log file for the drcontext thread. * If drcontext is NULL, returns the main log file. */ file_t dr_get_logfile(void *drcontext) { # ifdef DEBUG dcontext_t *dcontext = (dcontext_t *)drcontext; if (dcontext != NULL) return dcontext->logfile; else return main_logfile; # else return INVALID_FILE; # endif } DR_API /* Returns true iff the -stderr_mask runtime option is non-zero, indicating * that the user wants notification messages printed to stderr. */ bool dr_is_notify_on(void) { return (dynamo_options.stderr_mask != 0); } # ifdef WINDOWS DR_API file_t dr_get_stdout_file(void) { return get_stdout_handle(); } DR_API file_t dr_get_stderr_file(void) { return get_stderr_handle(); } DR_API file_t dr_get_stdin_file(void) { return get_stdin_handle(); } # endif # ifdef PROGRAM_SHEPHERDING DR_API void dr_write_forensics_report(void *dcontext, file_t file, dr_security_violation_type_t violation, dr_security_violation_action_t action, const char *violation_name) { security_violation_t sec_violation; action_type_t sec_action; switch (violation) { case DR_RCO_STACK_VIOLATION: sec_violation = STACK_EXECUTION_VIOLATION; break; case DR_RCO_HEAP_VIOLATION: sec_violation = HEAP_EXECUTION_VIOLATION; break; case DR_RCT_RETURN_VIOLATION: sec_violation = RETURN_TARGET_VIOLATION; break; case DR_RCT_INDIRECT_CALL_VIOLATION: sec_violation = INDIRECT_CALL_RCT_VIOLATION; break; case DR_RCT_INDIRECT_JUMP_VIOLATION: sec_violation = INDIRECT_JUMP_RCT_VIOLATION; break; default: CLIENT_ASSERT(false, "dr_write_forensics_report does not support " "DR_UNKNOWN_VIOLATION or invalid violation types"); return; } switch (action) { case DR_VIOLATION_ACTION_KILL_PROCESS: sec_action = ACTION_TERMINATE_PROCESS; break; case DR_VIOLATION_ACTION_CONTINUE: case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT: sec_action = ACTION_CONTINUE; break; case DR_VIOLATION_ACTION_KILL_THREAD: sec_action = ACTION_TERMINATE_THREAD; break; case DR_VIOLATION_ACTION_THROW_EXCEPTION: sec_action = ACTION_THROW_EXCEPTION; break; default: CLIENT_ASSERT(false, "dr_write_forensics_report invalid action selection"); return; } /* FIXME - could use a better message. */ append_diagnostics(file, action_message[sec_action], violation_name, sec_violation); } # endif /* PROGRAM_SHEPHERDING */ # ifdef WINDOWS DR_API void dr_messagebox(const char *fmt, ...) { dcontext_t *dcontext = NULL; if (!standalone_library) dcontext = get_thread_private_dcontext(); char msg[MAX_LOG_LENGTH]; wchar_t wmsg[MAX_LOG_LENGTH]; va_list ap; va_start(ap, fmt); vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap); NULL_TERMINATE_BUFFER(msg); snwprintf(wmsg, BUFFER_SIZE_ELEMENTS(wmsg), L"%S", msg); NULL_TERMINATE_BUFFER(wmsg); if (!standalone_library && IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = true; nt_messagebox(wmsg, debugbox_get_title()); if (!standalone_library && IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = false; va_end(ap); } static ssize_t dr_write_to_console(bool to_stdout, const char *fmt, va_list ap) { bool res = true; char msg[MAX_LOG_LENGTH]; uint written = 0; int len; HANDLE std; CLIENT_ASSERT(dr_using_console(), "internal logic error"); ASSERT(priv_kernel32 != NULL && kernel32_WriteFile != NULL); /* kernel32!GetStdHandle(STD_OUTPUT_HANDLE) == our PEB-based get_stdout_handle */ std = (to_stdout ? get_stdout_handle() : get_stderr_handle()); if (std == INVALID_HANDLE_VALUE) return false; len = vsnprintf(msg, BUFFER_SIZE_ELEMENTS(msg), fmt, ap); /* Let user know if message was truncated */ if (len < 0 || len == BUFFER_SIZE_ELEMENTS(msg)) res = false; NULL_TERMINATE_BUFFER(msg); /* Make this routine work in all kinds of windows by going through * kernel32!WriteFile, which will call WriteConsole for us. */ res = res && kernel32_WriteFile(std, msg, (DWORD)strlen(msg), (LPDWORD)&written, NULL); return (res ? written : 0); } static ssize_t dr_write_to_console_varg(bool to_stdout, const char *fmt, ...) { va_list ap; ssize_t res; va_start(ap, fmt); res = dr_write_to_console(to_stdout, fmt, ap); va_end(ap); return res; } DR_API bool dr_using_console(void) { bool res; if (get_os_version() >= WINDOWS_VERSION_8) { FILE_FS_DEVICE_INFORMATION device_info; HANDLE herr = get_stderr_handle(); /* The handle is invalid iff it's a gui app and the parent is a console */ if (herr == INVALID_HANDLE_VALUE) { module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll"); if (privload_attach_parent_console(app_kernel32->start) == false) { dr_free_module_data(app_kernel32); return false; } dr_free_module_data(app_kernel32); herr = get_stderr_handle(); } if (nt_query_volume_info(herr, &device_info, sizeof(device_info), FileFsDeviceInformation) == STATUS_SUCCESS) { if (device_info.DeviceType == FILE_DEVICE_CONSOLE) return true; } return false; } /* We detect cmd window using what kernel32!WriteFile uses: a handle * having certain bits set. */ res = (((ptr_int_t)get_stderr_handle() & 0x10000003) == 0x3); CLIENT_ASSERT(!res || get_os_version() < WINDOWS_VERSION_8, "Please report this: Windows 8 does have old-style consoles!"); return res; } DR_API bool dr_enable_console_printing(void) { bool success = false; /* b/c private loader sets cxt sw code up front based on whether have windows * priv libs or not, this can only be called during client init() */ if (dynamo_initialized) { CLIENT_ASSERT(false, "dr_enable_console_printing() must be called during init"); return false; } /* Direct writes to std handles work on win8+ (xref i#911) but we don't need * a separate check as the handle is detected as a non-console handle. */ if (!dr_using_console()) return true; if (!INTERNAL_OPTION(private_loader)) return false; if (!print_to_console) { if (priv_kernel32 == NULL) { /* Not using load_shared_library() b/c it won't search paths * for us. XXX: should add os-shared interface for * locate-and-load. */ priv_kernel32 = (shlib_handle_t)locate_and_load_private_library( "kernel32.dll", false /*!reachable*/); } if (priv_kernel32 != NULL && kernel32_WriteFile == NULL) { module_data_t *app_kernel32 = dr_lookup_module_by_name("kernel32.dll"); kernel32_WriteFile = (kernel32_WriteFile_t)lookup_library_routine(priv_kernel32, "WriteFile"); /* There is some problem in loading 32 bit kernel32.dll * when 64 bit kernel32.dll is already loaded. If kernel32 is * not loaded we can't call privload_console_share because it * assumes kernel32 is loaded */ if (app_kernel32 == NULL) { success = false; } else { success = privload_console_share(priv_kernel32, app_kernel32->start); dr_free_module_data(app_kernel32); } } /* We go ahead and cache whether dr_using_console(). If app really * changes its console, client could call this routine again * as a workaround. Seems unlikely: better to have better perf. */ print_to_console = (priv_kernel32 != NULL && kernel32_WriteFile != NULL && success); } return print_to_console; } # endif /* WINDOWS */ DR_API void dr_printf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); # ifdef WINDOWS if (print_to_console) dr_write_to_console(true /*stdout*/, fmt, ap); else # endif do_file_write(STDOUT, fmt, ap); va_end(ap); } DR_API ssize_t dr_vfprintf(file_t f, const char *fmt, va_list ap) { ssize_t written; # ifdef WINDOWS if ((f == STDOUT || f == STDERR) && print_to_console) { written = dr_write_to_console(f == STDOUT, fmt, ap); if (written <= 0) written = -1; } else # endif written = do_file_write(f, fmt, ap); return written; } DR_API ssize_t dr_fprintf(file_t f, const char *fmt, ...) { va_list ap; ssize_t res; va_start(ap, fmt); res = dr_vfprintf(f, fmt, ap); va_end(ap); return res; } DR_API int dr_snprintf(char *buf, size_t max, const char *fmt, ...) { int res; va_list ap; va_start(ap, fmt); /* PR 219380: we use d_r_vsnprintf instead of ntdll._vsnprintf b/c the * latter does not support floating point. * Plus, d_r_vsnprintf returns -1 for > max chars (matching Windows * behavior, but which Linux libc version does not do). */ res = d_r_vsnprintf(buf, max, fmt, ap); va_end(ap); return res; } DR_API int dr_vsnprintf(char *buf, size_t max, const char *fmt, va_list ap) { return d_r_vsnprintf(buf, max, fmt, ap); } DR_API int dr_snwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, ...) { int res; va_list ap; va_start(ap, fmt); res = d_r_vsnprintf_wide(buf, max, fmt, ap); va_end(ap); return res; } DR_API int dr_vsnwprintf(wchar_t *buf, size_t max, const wchar_t *fmt, va_list ap) { return d_r_vsnprintf_wide(buf, max, fmt, ap); } DR_API int dr_sscanf(const char *str, const char *fmt, ...) { int res; va_list ap; va_start(ap, fmt); res = d_r_vsscanf(str, fmt, ap); va_end(ap); return res; } DR_API const char * dr_get_token(const char *str, char *buf, size_t buflen) { /* We don't indicate whether any truncation happened. The * reasoning is that this is meant to be used on a string of known * size ahead of time, so the max size for any one token is known. */ const char *pos = str; CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(buflen), "buflen too large"); if (d_r_parse_word(str, &pos, buf, (uint)buflen) == NULL) return NULL; else return pos; } DR_API void dr_print_instr(void *drcontext, file_t f, instr_t *instr, const char *msg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_print_instr: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library, "dr_print_instr: drcontext is invalid"); dr_fprintf(f, "%s " PFX " ", msg, instr_get_translation(instr)); instr_disassemble(dcontext, instr, f); dr_fprintf(f, "\n"); } DR_API void dr_print_opnd(void *drcontext, file_t f, opnd_t opnd, const char *msg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_print_opnd: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT || standalone_library, "dr_print_opnd: drcontext is invalid"); dr_fprintf(f, "%s ", msg); opnd_disassemble(dcontext, opnd, f); dr_fprintf(f, "\n"); } /*************************************************************************** * Thread support */ DR_API /* Returns the DR context of the current thread */ void * dr_get_current_drcontext(void) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); return (void *)dcontext; } DR_API thread_id_t dr_get_thread_id(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_get_thread_id: drcontext is invalid"); return dcontext->owning_thread; } # ifdef WINDOWS /* Added for DrMem i#1254 */ DR_API HANDLE dr_get_dr_thread_handle(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_get_thread_id: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_get_thread_id: drcontext is invalid"); return dcontext->thread_record->handle; } # endif DR_API void * dr_get_tls_field(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_get_tls_field: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_get_tls_field: drcontext is invalid"); return dcontext->client_data->user_field; } DR_API void dr_set_tls_field(void *drcontext, void *value) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_set_tls_field: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_set_tls_field: drcontext is invalid"); dcontext->client_data->user_field = value; } DR_API void * dr_get_dr_segment_base(IN reg_id_t seg) { # ifdef AARCHXX if (seg == dr_reg_stolen) return os_get_dr_tls_base(get_thread_private_dcontext()); else return NULL; # else return get_segment_base(seg); # endif } DR_API bool dr_raw_tls_calloc(OUT reg_id_t *tls_register, OUT uint *offset, IN uint num_slots, IN uint alignment) { CLIENT_ASSERT(tls_register != NULL, "dr_raw_tls_calloc: tls_register cannot be NULL"); CLIENT_ASSERT(offset != NULL, "dr_raw_tls_calloc: offset cannot be NULL"); *tls_register = IF_X86_ELSE(SEG_TLS, dr_reg_stolen); if (num_slots == 0) return true; return os_tls_calloc(offset, num_slots, alignment); } DR_API bool dr_raw_tls_cfree(uint offset, uint num_slots) { if (num_slots == 0) return true; return os_tls_cfree(offset, num_slots); } DR_API opnd_t dr_raw_tls_opnd(void *drcontext, reg_id_t tls_register, uint tls_offs) { CLIENT_ASSERT(drcontext != NULL, "dr_raw_tls_opnd: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_raw_tls_opnd: drcontext is invalid"); IF_X86_ELSE( { return opnd_create_far_base_disp_ex(tls_register, DR_REG_NULL, DR_REG_NULL, 0, tls_offs, OPSZ_PTR, /* modern processors don't want addr16 * prefixes */ false, true, false); }, { return OPND_CREATE_MEMPTR(tls_register, tls_offs); }); } DR_API void dr_insert_read_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t tls_register, uint tls_offs, reg_id_t reg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_insert_read_raw_tls: drcontext cannot be NULL"); CLIENT_ASSERT(reg_is_pointer_sized(reg), "must use a pointer-sized general-purpose register"); IF_X86_ELSE( { MINSERT( ilist, where, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(reg), dr_raw_tls_opnd(drcontext, tls_register, tls_offs))); }, { MINSERT( ilist, where, XINST_CREATE_load(dcontext, opnd_create_reg(reg), dr_raw_tls_opnd(drcontext, tls_register, tls_offs))); }); } DR_API void dr_insert_write_raw_tls(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t tls_register, uint tls_offs, reg_id_t reg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_insert_write_raw_tls: drcontext cannot be NULL"); CLIENT_ASSERT(reg_is_pointer_sized(reg), "must use a pointer-sized general-purpose register"); IF_X86_ELSE( { MINSERT(ilist, where, INSTR_CREATE_mov_st( dcontext, dr_raw_tls_opnd(drcontext, tls_register, tls_offs), opnd_create_reg(reg))); }, { MINSERT(ilist, where, XINST_CREATE_store(dcontext, dr_raw_tls_opnd(drcontext, tls_register, tls_offs), opnd_create_reg(reg))); }); } DR_API /* Current thread gives up its time quantum. */ void dr_thread_yield(void) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = true; else dcontext->client_data->at_safe_to_terminate_syscall = true; os_thread_yield(); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = false; else dcontext->client_data->at_safe_to_terminate_syscall = false; } DR_API /* Current thread sleeps for time_ms milliseconds. */ void dr_sleep(int time_ms) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = true; else dcontext->client_data->at_safe_to_terminate_syscall = true; os_thread_sleep(time_ms); if (IS_CLIENT_THREAD(dcontext)) dcontext->client_data->client_thread_safe_for_synch = false; else dcontext->client_data->at_safe_to_terminate_syscall = false; } # ifdef CLIENT_SIDELINE DR_API bool dr_client_thread_set_suspendable(bool suspendable) { /* see notes in synch_with_all_threads() */ dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); if (!IS_CLIENT_THREAD(dcontext)) return false; dcontext->client_data->suspendable = suspendable; return true; } # endif DR_API bool dr_suspend_all_other_threads_ex(OUT void ***drcontexts, OUT uint *num_suspended, OUT uint *num_unsuspended, dr_suspend_flags_t flags) { uint out_suspended = 0, out_unsuspended = 0; thread_record_t **threads; int num_threads; dcontext_t *my_dcontext = get_thread_private_dcontext(); int i; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); CLIENT_ASSERT(OWN_NO_LOCKS(my_dcontext), "dr_suspend_all_other_threads cannot be called while holding a lock"); CLIENT_ASSERT(drcontexts != NULL && num_suspended != NULL, "dr_suspend_all_other_threads invalid params"); LOG(GLOBAL, LOG_FRAGMENT, 2, "\ndr_suspend_all_other_threads: thread " TIDFMT " suspending all threads\n", d_r_get_thread_id()); /* suspend all DR-controlled threads at safe locations */ if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER, &threads, &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER, /* if we fail to suspend a thread (e.g., for * privilege reasons), ignore and continue */ THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) { LOG(GLOBAL, LOG_FRAGMENT, 2, "\ndr_suspend_all_other_threads: failed to suspend every thread\n"); /* some threads may have been successfully suspended so we must return * their info so they'll be resumed. I believe there is thus no * scenario under which we return false. */ } /* now we own the thread_initexit_lock */ CLIENT_ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock), "internal locking error"); /* To avoid two passes we allocate the array now. It may be larger than * necessary if we had suspend failures but taht's ok. * We hide the threads num and array in extra slots. */ *drcontexts = (void **)global_heap_alloc( (num_threads + 2) * sizeof(dcontext_t *) HEAPACCT(ACCT_THREAD_MGT)); for (i = 0; i < num_threads; i++) { dcontext_t *dcontext = threads[i]->dcontext; if (dcontext != NULL) { /* include my_dcontext here */ if (dcontext != my_dcontext) { /* must translate BEFORE freeing any memory! */ if (!thread_synch_successful(threads[i])) { out_unsuspended++; } else if (is_thread_currently_native(threads[i]) && !TEST(DR_SUSPEND_NATIVE, flags)) { out_unsuspended++; } else if (thread_synch_state_no_xfer(dcontext)) { /* FIXME: for all other synchall callers, the app * context should be sitting in their mcontext, even * though we can't safely get their native context and * translate it. */ (*drcontexts)[out_suspended] = (void *)dcontext; out_suspended++; CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext, "internal inconsistency in where mcontext is"); /* officially get_mcontext() doesn't always set pc: we do anyway */ get_mcontext(dcontext)->pc = dcontext->next_tag; dcontext->client_data->mcontext_in_dcontext = true; } else { (*drcontexts)[out_suspended] = (void *)dcontext; out_suspended++; /* It's not safe to clobber the thread's mcontext with * its own translation b/c for shared_syscall we store * the continuation pc in the esi slot. * We could translate here into heap-allocated memory, * but some clients may just want to stop * the world but not examine the threads, so we lazily * translate in dr_get_mcontext(). */ CLIENT_ASSERT(!dcontext->client_data->suspended, "inconsistent usage of dr_suspend_all_other_threads"); CLIENT_ASSERT(dcontext->client_data->cur_mc == NULL, "inconsistent usage of dr_suspend_all_other_threads"); dcontext->client_data->suspended = true; } } } } /* Hide the two extra vars we need the client to pass back to us */ (*drcontexts)[out_suspended] = (void *)threads; (*drcontexts)[out_suspended + 1] = (void *)(ptr_uint_t)num_threads; *num_suspended = out_suspended; if (num_unsuspended != NULL) *num_unsuspended = out_unsuspended; return true; } DR_API bool dr_suspend_all_other_threads(OUT void ***drcontexts, OUT uint *num_suspended, OUT uint *num_unsuspended) { return dr_suspend_all_other_threads_ex(drcontexts, num_suspended, num_unsuspended, 0); } bool dr_resume_all_other_threads(IN void **drcontexts, IN uint num_suspended) { thread_record_t **threads; int num_threads; uint i; CLIENT_ASSERT(drcontexts != NULL, "dr_suspend_all_other_threads invalid params"); LOG(GLOBAL, LOG_FRAGMENT, 2, "dr_resume_all_other_threads\n"); threads = (thread_record_t **)drcontexts[num_suspended]; num_threads = (int)(ptr_int_t)drcontexts[num_suspended + 1]; for (i = 0; i < num_suspended; i++) { dcontext_t *dcontext = (dcontext_t *)drcontexts[i]; if (dcontext->client_data->cur_mc != NULL) { /* clear any cached mc from dr_get_mcontext_priv() */ heap_free(dcontext, dcontext->client_data->cur_mc, sizeof(*dcontext->client_data->cur_mc) HEAPACCT(ACCT_CLIENT)); dcontext->client_data->cur_mc = NULL; } dcontext->client_data->suspended = false; } global_heap_free(drcontexts, (num_threads + 2) * sizeof(dcontext_t *) HEAPACCT(ACCT_THREAD_MGT)); end_synch_with_all_threads(threads, num_threads, true /*resume*/); return true; } DR_API bool dr_is_thread_native(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "invalid param"); return is_thread_currently_native(dcontext->thread_record); } DR_API bool dr_retakeover_suspended_native_thread(void *drcontext) { bool res; dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "invalid param"); /* XXX: I don't quite see why I need to pop these 2 when I'm doing * what a regular retakeover would do */ KSTOP_NOT_MATCHING_DC(dcontext, fcache_default); KSTOP_NOT_MATCHING_DC(dcontext, dispatch_num_exits); res = os_thread_take_over_suspended_native(dcontext); return res; } # ifdef UNIX DR_API bool dr_set_itimer(int which, uint millisec, void (*func)(void *drcontext, dr_mcontext_t *mcontext)) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); if (func == NULL && millisec != 0) return false; return set_itimer_callback(dcontext, which, millisec, NULL, (void (*)(dcontext_t *, dr_mcontext_t *))func); } DR_API uint dr_get_itimer(int which) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); return get_itimer_frequency(dcontext, which); } # endif /* UNIX */ DR_API void dr_track_where_am_i(void) { track_where_am_i = true; } bool should_track_where_am_i(void) { return track_where_am_i || DYNAMO_OPTION(profile_pcs); } DR_API bool dr_is_tracking_where_am_i(void) { return should_track_where_am_i(); } DR_API dr_where_am_i_t dr_where_am_i(void *drcontext, app_pc pc, OUT void **tag_out) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "invalid param"); void *tag = NULL; dr_where_am_i_t whereami = dcontext->whereami; /* Further refine if pc is in the cache. */ if (whereami == DR_WHERE_FCACHE) { fragment_t *fragment; whereami = fcache_refine_whereami(dcontext, whereami, pc, &fragment); if (fragment != NULL) tag = fragment->tag; } if (tag_out != NULL) *tag_out = tag; return whereami; } #endif /* CLIENT_INTERFACE */ DR_API void instrlist_meta_fault_preinsert(instrlist_t *ilist, instr_t *where, instr_t *inst) { instr_set_meta_may_fault(inst, true); instrlist_preinsert(ilist, where, inst); } DR_API void instrlist_meta_fault_postinsert(instrlist_t *ilist, instr_t *where, instr_t *inst) { instr_set_meta_may_fault(inst, true); instrlist_postinsert(ilist, where, inst); } DR_API void instrlist_meta_fault_append(instrlist_t *ilist, instr_t *inst) { instr_set_meta_may_fault(inst, true); instrlist_append(ilist, inst); } static void convert_va_list_to_opnd(dcontext_t *dcontext, opnd_t **args, uint num_args, va_list ap) { uint i; ASSERT(num_args > 0); /* allocate at least one argument opnd */ /* we don't check for GLOBAL_DCONTEXT since DR internally calls this */ *args = HEAP_ARRAY_ALLOC(dcontext, opnd_t, num_args, ACCT_CLEANCALL, UNPROTECTED); for (i = 0; i < num_args; i++) { (*args)[i] = va_arg(ap, opnd_t); CLIENT_ASSERT(opnd_is_valid((*args)[i]), "Call argument: bad operand. Did you create a valid opnd_t?"); } } static void free_va_opnd_list(dcontext_t *dcontext, uint num_args, opnd_t *args) { if (num_args != 0) { HEAP_ARRAY_FREE(dcontext, args, opnd_t, num_args, ACCT_CLEANCALL, UNPROTECTED); } } /* dr_insert_* are used by general DR */ /* Inserts a complete call to callee with the passed-in arguments */ void dr_insert_call(void *drcontext, instrlist_t *ilist, instr_t *where, void *callee, uint num_args, ...) { dcontext_t *dcontext = (dcontext_t *)drcontext; opnd_t *args = NULL; instr_t *label = INSTR_CREATE_label(drcontext); dr_pred_type_t auto_pred = instrlist_get_auto_predicate(ilist); va_list ap; CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL"); instrlist_set_auto_predicate(ilist, DR_PRED_NONE); #ifdef ARM if (instr_predicate_is_cond(auto_pred)) { /* auto_predicate is set, though we handle the clean call with a cbr * because we require inserting instrumentation which modifies cpsr. */ MINSERT(ilist, where, XINST_CREATE_jump_cond(drcontext, instr_invert_predicate(auto_pred), opnd_create_instr(label))); } #endif if (num_args != 0) { va_start(ap, num_args); convert_va_list_to_opnd(dcontext, &args, num_args, ap); va_end(ap); } insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS, vmcode_get_start(), callee, num_args, args); if (num_args != 0) free_va_opnd_list(dcontext, num_args, args); MINSERT(ilist, where, label); instrlist_set_auto_predicate(ilist, auto_pred); } bool dr_insert_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where, byte *encode_pc, void *callee, uint num_args, ...) { dcontext_t *dcontext = (dcontext_t *)drcontext; opnd_t *args = NULL; bool direct; va_list ap; CLIENT_ASSERT(drcontext != NULL, "dr_insert_call: drcontext cannot be NULL"); if (num_args != 0) { va_start(ap, num_args); convert_va_list_to_opnd(drcontext, &args, num_args, ap); va_end(ap); } direct = insert_meta_call_vargs(dcontext, ilist, where, META_CALL_RETURNS, encode_pc, callee, num_args, args); if (num_args != 0) free_va_opnd_list(dcontext, num_args, args); return direct; } /* Not exported. Currently used for ARM to avoid storing to %lr. */ void dr_insert_call_noreturn(void *drcontext, instrlist_t *ilist, instr_t *where, void *callee, uint num_args, ...) { dcontext_t *dcontext = (dcontext_t *)drcontext; opnd_t *args = NULL; va_list ap; CLIENT_ASSERT(drcontext != NULL, "dr_insert_call_noreturn: drcontext cannot be NULL"); CLIENT_ASSERT(instrlist_get_auto_predicate(ilist) == DR_PRED_NONE, "Does not support auto-predication"); if (num_args != 0) { va_start(ap, num_args); convert_va_list_to_opnd(dcontext, &args, num_args, ap); va_end(ap); } insert_meta_call_vargs(dcontext, ilist, where, 0, vmcode_get_start(), callee, num_args, args); if (num_args != 0) free_va_opnd_list(dcontext, num_args, args); } /* Internal utility routine for inserting context save for a clean call. * Returns the size of the data stored on the DR stack * (in case the caller needs to align the stack pointer). * XSP and XAX are modified by this call. */ static uint prepare_for_call_ex(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist, instr_t *where, byte *encode_pc) { instr_t *in; uint dstack_offs; in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where); dstack_offs = prepare_for_clean_call(dcontext, cci, ilist, where, encode_pc); /* now go through and mark inserted instrs as meta */ if (in == NULL) in = instrlist_first(ilist); else in = instr_get_next(in); while (in != where) { instr_set_meta(in); in = instr_get_next(in); } return dstack_offs; } /* Internal utility routine for inserting context restore for a clean call. */ static void cleanup_after_call_ex(dcontext_t *dcontext, clean_call_info_t *cci, instrlist_t *ilist, instr_t *where, uint sizeof_param_area, byte *encode_pc) { instr_t *in; in = (where == NULL) ? instrlist_last(ilist) : instr_get_prev(where); if (sizeof_param_area > 0) { /* clean up the parameter area */ CLIENT_ASSERT(sizeof_param_area <= 127, "cleanup_after_call_ex: sizeof_param_area must be <= 127"); /* mark it meta down below */ instrlist_preinsert(ilist, where, XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP), OPND_CREATE_INT8(sizeof_param_area))); } cleanup_after_clean_call(dcontext, cci, ilist, where, encode_pc); /* now go through and mark inserted instrs as meta */ if (in == NULL) in = instrlist_first(ilist); else in = instr_get_next(in); while (in != where) { instr_set_meta(in); in = instr_get_next(in); } } /* Inserts a complete call to callee with the passed-in arguments, wrapped * by an app save and restore. * * If "save_flags" includes DR_CLEANCALL_SAVE_FLOAT, saves the fp/mmx/sse state. * * NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via * dr_prepare_for_call(). We guarantee to clients that all other slots * (except the XAX mcontext slot) will remain untouched. * * NOTE : dr_insert_cbr_instrumentation has assumption about the clean call * instrumentation layout, changes to the clean call instrumentation may break * dr_insert_cbr_instrumentation. */ void dr_insert_clean_call_ex_varg(void *drcontext, instrlist_t *ilist, instr_t *where, void *callee, dr_cleancall_save_t save_flags, uint num_args, opnd_t *args) { dcontext_t *dcontext = (dcontext_t *)drcontext; uint dstack_offs, pad = 0; size_t buf_sz = 0; clean_call_info_t cci; /* information for clean call insertion. */ bool save_fpstate = TEST(DR_CLEANCALL_SAVE_FLOAT, save_flags); meta_call_flags_t call_flags = META_CALL_CLEAN | META_CALL_RETURNS; byte *encode_pc; instr_t *label = INSTR_CREATE_label(drcontext); dr_pred_type_t auto_pred = instrlist_get_auto_predicate(ilist); CLIENT_ASSERT(drcontext != NULL, "dr_insert_clean_call: drcontext cannot be NULL"); STATS_INC(cleancall_inserted); LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: insert clean call to " PFX "\n", callee); instrlist_set_auto_predicate(ilist, DR_PRED_NONE); #ifdef ARM if (instr_predicate_is_cond(auto_pred)) { /* auto_predicate is set, though we handle the clean call with a cbr * because we require inserting instrumentation which modifies cpsr. */ MINSERT(ilist, where, XINST_CREATE_jump_cond(drcontext, instr_invert_predicate(auto_pred), opnd_create_instr(label))); } #endif /* analyze the clean call, return true if clean call can be inlined. */ if (analyze_clean_call(dcontext, &cci, where, callee, save_fpstate, TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags), num_args, args) && !TEST(DR_CLEANCALL_ALWAYS_OUT_OF_LINE, save_flags)) { #ifdef CLIENT_INTERFACE /* we can perform the inline optimization and return. */ STATS_INC(cleancall_inlined); LOG(THREAD, LOG_CLEANCALL, 2, "CLEANCALL: inlined callee " PFX "\n", callee); insert_inline_clean_call(dcontext, &cci, ilist, where, args); MINSERT(ilist, where, label); instrlist_set_auto_predicate(ilist, auto_pred); return; #else /* CLIENT_INTERFACE */ ASSERT_NOT_REACHED(); #endif /* CLIENT_INTERFACE */ } /* honor requests from caller */ if (TEST(DR_CLEANCALL_NOSAVE_FLAGS, save_flags)) { /* even if we remove flag saves we want to keep mcontext shape */ cci.preserve_mcontext = true; cci.skip_save_flags = true; /* we assume this implies DF should be 0 already */ cci.skip_clear_flags = true; /* XXX: should also provide DR_CLEANCALL_NOSAVE_NONAFLAGS to * preserve just arith flags on return from a call */ } if (TESTANY(DR_CLEANCALL_NOSAVE_XMM | DR_CLEANCALL_NOSAVE_XMM_NONPARAM | DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) { int i; /* even if we remove xmm saves we want to keep mcontext shape */ cci.preserve_mcontext = true; /* start w/ all */ #if defined(X64) && defined(WINDOWS) cci.num_simd_skip = 6; #else /* all 8 (or 16) are scratch */ cci.num_simd_skip = proc_num_simd_registers(); #endif for (i = 0; i < cci.num_simd_skip; i++) cci.simd_skip[i] = true; /* now remove those used for param/retval */ #ifdef X64 if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONPARAM, save_flags)) { /* xmm0-3 (-7 for linux) are used for params */ for (i = 0; i < IF_UNIX_ELSE(7, 3); i++) cci.simd_skip[i] = false; cci.num_simd_skip -= i; } if (TEST(DR_CLEANCALL_NOSAVE_XMM_NONRET, save_flags)) { /* xmm0 (and xmm1 for linux) are used for retvals */ cci.simd_skip[0] = false; cci.num_simd_skip--; # ifdef UNIX cci.simd_skip[1] = false; cci.num_simd_skip--; # endif } #endif } if (TEST(DR_CLEANCALL_INDIRECT, save_flags)) encode_pc = vmcode_unreachable_pc(); else encode_pc = vmcode_get_start(); dstack_offs = prepare_for_call_ex(dcontext, &cci, ilist, where, encode_pc); #ifdef X64 /* PR 218790: we assume that dr_prepare_for_call() leaves stack 16-byte * aligned, which is what insert_meta_call_vargs requires. */ if (cci.should_align) { CLIENT_ASSERT(ALIGNED(dstack_offs, 16), "internal error: bad stack alignment"); } #endif if (save_fpstate) { /* save on the stack: xref PR 202669 on clients using more stack */ buf_sz = proc_fpstate_save_size(); /* we need 16-byte-alignment */ pad = ALIGN_FORWARD_UINT(dstack_offs, 16) - dstack_offs; IF_X64(CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_int(buf_sz + pad), "dr_insert_clean_call: internal truncation error")); MINSERT(ilist, where, XINST_CREATE_sub(dcontext, opnd_create_reg(REG_XSP), OPND_CREATE_INT32((int)(buf_sz + pad)))); dr_insert_save_fpstate(drcontext, ilist, where, opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0, OPSZ_512)); } /* PR 302951: restore state if clean call args reference app memory. * We use a hack here: this is the only instance where we mark as our-mangling * but do not have a translation target set, which indicates to the restore * routines that this is a clean call. If the client adds instrs in the middle * translation will fail; if the client modifies any instr, the our-mangling * flag will disappear and translation will fail. */ instrlist_set_our_mangling(ilist, true); if (TEST(DR_CLEANCALL_RETURNS_TO_NATIVE, save_flags)) call_flags |= META_CALL_RETURNS_TO_NATIVE; insert_meta_call_vargs(dcontext, ilist, where, call_flags, encode_pc, callee, num_args, args); instrlist_set_our_mangling(ilist, false); if (save_fpstate) { dr_insert_restore_fpstate( drcontext, ilist, where, opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0, OPSZ_512)); MINSERT(ilist, where, XINST_CREATE_add(dcontext, opnd_create_reg(REG_XSP), OPND_CREATE_INT32(buf_sz + pad))); } cleanup_after_call_ex(dcontext, &cci, ilist, where, 0, encode_pc); MINSERT(ilist, where, label); instrlist_set_auto_predicate(ilist, auto_pred); } void dr_insert_clean_call_ex(void *drcontext, instrlist_t *ilist, instr_t *where, void *callee, dr_cleancall_save_t save_flags, uint num_args, ...) { opnd_t *args = NULL; if (num_args != 0) { va_list ap; va_start(ap, num_args); convert_va_list_to_opnd(drcontext, &args, num_args, ap); va_end(ap); } dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, save_flags, num_args, args); if (num_args != 0) free_va_opnd_list(drcontext, num_args, args); } DR_API void dr_insert_clean_call(void *drcontext, instrlist_t *ilist, instr_t *where, void *callee, bool save_fpstate, uint num_args, ...) { dr_cleancall_save_t flags = (save_fpstate ? DR_CLEANCALL_SAVE_FLOAT : 0); opnd_t *args = NULL; if (num_args != 0) { va_list ap; va_start(ap, num_args); convert_va_list_to_opnd(drcontext, &args, num_args, ap); va_end(ap); } dr_insert_clean_call_ex_varg(drcontext, ilist, where, callee, flags, num_args, args); if (num_args != 0) free_va_opnd_list(drcontext, num_args, args); } /* Utility routine for inserting a clean call to an instrumentation routine * Returns the size of the data stored on the DR stack (in case the caller * needs to align the stack pointer). XSP and XAX are modified by this call. * * NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via * prepare_for_clean_call(). We guarantee to clients that all other slots * (except the XAX mcontext slot) will remain untouched. */ DR_API uint dr_prepare_for_call(void *drcontext, instrlist_t *ilist, instr_t *where) { CLIENT_ASSERT(drcontext != NULL, "dr_prepare_for_call: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_prepare_for_call: drcontext is invalid"); return prepare_for_call_ex((dcontext_t *)drcontext, NULL, ilist, where, vmcode_get_start()); } DR_API void dr_cleanup_after_call(void *drcontext, instrlist_t *ilist, instr_t *where, uint sizeof_param_area) { CLIENT_ASSERT(drcontext != NULL, "dr_cleanup_after_call: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_cleanup_after_call: drcontext is invalid"); cleanup_after_call_ex((dcontext_t *)drcontext, NULL, ilist, where, sizeof_param_area, vmcode_get_start()); } #ifdef CLIENT_INTERFACE DR_API void dr_swap_to_clean_stack(void *drcontext, instrlist_t *ilist, instr_t *where) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_swap_to_clean_stack: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_swap_to_clean_stack: drcontext is invalid"); /* PR 219620: For thread-shared, we need to get the dcontext * dynamically rather than use the constant passed in here. */ if (SCRATCH_ALWAYS_TLS()) { MINSERT(ilist, where, instr_create_save_to_tls(dcontext, SCRATCH_REG0, TLS_REG0_SLOT)); insert_get_mcontext_base(dcontext, ilist, where, SCRATCH_REG0); /* save app xsp, and then bring in dstack to xsp */ MINSERT( ilist, where, instr_create_save_to_dc_via_reg(dcontext, SCRATCH_REG0, REG_XSP, XSP_OFFSET)); /* DSTACK_OFFSET isn't within the upcontext so if it's separate this won't * work right. FIXME - the dcontext accessing routines are a mess of shared * vs. no shared support, separate context vs. no separate context support etc. */ ASSERT_NOT_IMPLEMENTED(!TEST(SELFPROT_DCONTEXT, dynamo_options.protect_mask)); MINSERT(ilist, where, instr_create_restore_from_dc_via_reg(dcontext, SCRATCH_REG0, REG_XSP, DSTACK_OFFSET)); MINSERT(ilist, where, instr_create_restore_from_tls(dcontext, SCRATCH_REG0, TLS_REG0_SLOT)); } else { MINSERT(ilist, where, instr_create_save_to_dcontext(dcontext, REG_XSP, XSP_OFFSET)); MINSERT(ilist, where, instr_create_restore_dynamo_stack(dcontext)); } } DR_API void dr_restore_app_stack(void *drcontext, instrlist_t *ilist, instr_t *where) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_restore_app_stack: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_restore_app_stack: drcontext is invalid"); /* restore stack */ if (SCRATCH_ALWAYS_TLS()) { /* use the register we're about to clobber as scratch space */ insert_get_mcontext_base(dcontext, ilist, where, REG_XSP); MINSERT( ilist, where, instr_create_restore_from_dc_via_reg(dcontext, REG_XSP, REG_XSP, XSP_OFFSET)); } else { MINSERT(ilist, where, instr_create_restore_from_dcontext(dcontext, REG_XSP, XSP_OFFSET)); } } # define SPILL_SLOT_TLS_MAX 2 # define NUM_TLS_SPILL_SLOTS (SPILL_SLOT_TLS_MAX + 1) # define NUM_SPILL_SLOTS (SPILL_SLOT_MAX + 1) /* The three tls slots we make available to clients. We reserve TLS_REG0_SLOT for our * own use in dr convenience routines. Note the +1 is because the max is an array index * (so zero based) while array size is number of slots. We don't need to +1 in * SPILL_SLOT_MC_REG because subtracting SPILL_SLOT_TLS_MAX already accounts for it. */ static const ushort SPILL_SLOT_TLS_OFFS[NUM_TLS_SPILL_SLOTS] = { TLS_REG3_SLOT, TLS_REG2_SLOT, TLS_REG1_SLOT }; static const reg_id_t SPILL_SLOT_MC_REG[NUM_SPILL_SLOTS - NUM_TLS_SPILL_SLOTS] = { # ifdef X86 /* The dcontext reg slots we make available to clients. We reserve XAX and XSP * for our own use in dr convenience routines. */ # ifdef X64 REG_R15, REG_R14, REG_R13, REG_R12, REG_R11, REG_R10, REG_R9, REG_R8, # endif REG_XDI, REG_XSI, REG_XBP, REG_XDX, REG_XCX, REG_XBX # elif defined(AARCHXX) /* DR_REG_R0 is not used here. See prepare_for_clean_call. */ DR_REG_R6, DR_REG_R5, DR_REG_R4, DR_REG_R3, DR_REG_R2, DR_REG_R1 # endif /* X86/ARM */ }; DR_API void dr_save_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg, dr_spill_slot_t slot) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_save_reg: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_save_reg: drcontext is invalid"); CLIENT_ASSERT(slot <= SPILL_SLOT_MAX, "dr_save_reg: invalid spill slot selection"); CLIENT_ASSERT(reg_is_pointer_sized(reg), "dr_save_reg requires pointer-sized gpr"); if (slot <= SPILL_SLOT_TLS_MAX) { ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]); MINSERT(ilist, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(offs), opnd_create_reg(reg))); } else { reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS]; int offs = opnd_get_reg_dcontext_offs(reg_slot); if (SCRATCH_ALWAYS_TLS()) { /* PR 219620: For thread-shared, we need to get the dcontext * dynamically rather than use the constant passed in here. */ reg_id_t tmp = (reg == SCRATCH_REG0) ? SCRATCH_REG1 : SCRATCH_REG0; MINSERT(ilist, where, instr_create_save_to_tls(dcontext, tmp, TLS_REG0_SLOT)); insert_get_mcontext_base(dcontext, ilist, where, tmp); MINSERT(ilist, where, instr_create_save_to_dc_via_reg(dcontext, tmp, reg, offs)); MINSERT(ilist, where, instr_create_restore_from_tls(dcontext, tmp, TLS_REG0_SLOT)); } else { MINSERT(ilist, where, instr_create_save_to_dcontext(dcontext, reg, offs)); } } } /* if want to save 8 or 16-bit reg, must pass in containing ptr-sized reg! */ DR_API void dr_restore_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg, dr_spill_slot_t slot) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_restore_reg: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_restore_reg: drcontext is invalid"); CLIENT_ASSERT(slot <= SPILL_SLOT_MAX, "dr_restore_reg: invalid spill slot selection"); CLIENT_ASSERT(reg_is_pointer_sized(reg), "dr_restore_reg requires a pointer-sized gpr"); if (slot <= SPILL_SLOT_TLS_MAX) { ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]); MINSERT(ilist, where, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(offs))); } else { reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS]; int offs = opnd_get_reg_dcontext_offs(reg_slot); if (SCRATCH_ALWAYS_TLS()) { /* PR 219620: For thread-shared, we need to get the dcontext * dynamically rather than use the constant passed in here. */ /* use the register we're about to clobber as scratch space */ insert_get_mcontext_base(dcontext, ilist, where, reg); MINSERT(ilist, where, instr_create_restore_from_dc_via_reg(dcontext, reg, reg, offs)); } else { MINSERT(ilist, where, instr_create_restore_from_dcontext(dcontext, reg, offs)); } } } DR_API dr_spill_slot_t dr_max_opnd_accessible_spill_slot() { if (SCRATCH_ALWAYS_TLS()) return SPILL_SLOT_TLS_MAX; else return SPILL_SLOT_MAX; } /* creates an opnd to access spill slot slot, slot must be <= * dr_max_opnd_accessible_spill_slot() */ opnd_t reg_spill_slot_opnd(dcontext_t *dcontext, dr_spill_slot_t slot) { if (slot <= SPILL_SLOT_TLS_MAX) { ushort offs = os_tls_offset(SPILL_SLOT_TLS_OFFS[slot]); return opnd_create_tls_slot(offs); } else { reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS]; int offs = opnd_get_reg_dcontext_offs(reg_slot); ASSERT(!SCRATCH_ALWAYS_TLS()); /* client assert above should catch */ return opnd_create_dcontext_field(dcontext, offs); } } DR_API opnd_t dr_reg_spill_slot_opnd(void *drcontext, dr_spill_slot_t slot) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_reg_spill_slot_opnd: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_reg_spill_slot_opnd: drcontext is invalid"); CLIENT_ASSERT(slot <= dr_max_opnd_accessible_spill_slot(), "dr_reg_spill_slot_opnd: slot must be less than " "dr_max_opnd_accessible_spill_slot()"); return reg_spill_slot_opnd(dcontext, slot); } DR_API /* used to read a saved register spill slot from a clean call or a restore_state_event */ reg_t dr_read_saved_reg(void *drcontext, dr_spill_slot_t slot) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); CLIENT_ASSERT(drcontext != NULL, "dr_read_saved_reg: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_read_saved_reg: drcontext is invalid"); CLIENT_ASSERT(slot <= SPILL_SLOT_MAX, "dr_read_saved_reg: invalid spill slot selection"); /* We do allow drcontext to not belong to the current thread, for state restoration * during synchall and other scenarios. */ if (slot <= SPILL_SLOT_TLS_MAX) { ushort offs = SPILL_SLOT_TLS_OFFS[slot]; return *(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs); } else { reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS]; return reg_get_value_priv(reg_slot, get_mcontext(dcontext)); } } DR_API /* used to write a saved register spill slot from a clean call */ void dr_write_saved_reg(void *drcontext, dr_spill_slot_t slot, reg_t value) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); CLIENT_ASSERT(drcontext != NULL, "dr_write_saved_reg: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_write_saved_reg: drcontext is invalid"); CLIENT_ASSERT(slot <= SPILL_SLOT_MAX, "dr_write_saved_reg: invalid spill slot selection"); /* We do allow drcontext to not belong to the current thread, for state restoration * during synchall and other scenarios. */ if (slot <= SPILL_SLOT_TLS_MAX) { ushort offs = SPILL_SLOT_TLS_OFFS[slot]; *(reg_t *)(((byte *)&dcontext->local_state->spill_space) + offs) = value; } else { reg_id_t reg_slot = SPILL_SLOT_MC_REG[slot - NUM_TLS_SPILL_SLOTS]; reg_set_value_priv(reg_slot, get_mcontext(dcontext), value); } } DR_API /** * Inserts into ilist prior to "where" instruction(s) to read into the * general-purpose full-size register reg from the user-controlled drcontext * field for this thread. */ void dr_insert_read_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_insert_read_tls_field: drcontext cannot be NULL"); CLIENT_ASSERT(reg_is_pointer_sized(reg), "must use a pointer-sized general-purpose register"); if (SCRATCH_ALWAYS_TLS()) { /* For thread-shared, since reg must be general-purpose we can * use it as a base pointer (repeatedly). Plus it's already dead. */ MINSERT(ilist, where, instr_create_restore_from_tls(dcontext, reg, TLS_DCONTEXT_SLOT)); MINSERT( ilist, where, instr_create_restore_from_dc_via_reg(dcontext, reg, reg, CLIENT_DATA_OFFSET)); MINSERT(ilist, where, XINST_CREATE_load( dcontext, opnd_create_reg(reg), OPND_CREATE_MEMPTR(reg, offsetof(client_data_t, user_field)))); } else { MINSERT(ilist, where, XINST_CREATE_load( dcontext, opnd_create_reg(reg), OPND_CREATE_ABSMEM(&dcontext->client_data->user_field, OPSZ_PTR))); } } DR_API /** * Inserts into ilist prior to "where" instruction(s) to write the * general-purpose full-size register reg to the user-controlled drcontext field * for this thread. */ void dr_insert_write_tls_field(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_insert_write_tls_field: drcontext cannot be NULL"); CLIENT_ASSERT(reg_is_pointer_sized(reg), "must use a pointer-sized general-purpose register"); if (SCRATCH_ALWAYS_TLS()) { reg_id_t spill = SCRATCH_REG0; if (reg == spill) /* don't need sub-reg test b/c we know it's pointer-sized */ spill = SCRATCH_REG1; MINSERT(ilist, where, instr_create_save_to_tls(dcontext, spill, TLS_REG0_SLOT)); MINSERT(ilist, where, instr_create_restore_from_tls(dcontext, spill, TLS_DCONTEXT_SLOT)); MINSERT(ilist, where, instr_create_restore_from_dc_via_reg(dcontext, spill, spill, CLIENT_DATA_OFFSET)); MINSERT(ilist, where, XINST_CREATE_store( dcontext, OPND_CREATE_MEMPTR(spill, offsetof(client_data_t, user_field)), opnd_create_reg(reg))); MINSERT(ilist, where, instr_create_restore_from_tls(dcontext, spill, TLS_REG0_SLOT)); } else { MINSERT(ilist, where, XINST_CREATE_store( dcontext, OPND_CREATE_ABSMEM(&dcontext->client_data->user_field, OPSZ_PTR), opnd_create_reg(reg))); } } DR_API void dr_save_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where, dr_spill_slot_t slot) { reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0); CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only"); CLIENT_ASSERT(drcontext != NULL, "dr_save_arith_flags: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_save_arith_flags: drcontext is invalid"); CLIENT_ASSERT(slot <= SPILL_SLOT_MAX, "dr_save_arith_flags: invalid spill slot selection"); dr_save_reg(drcontext, ilist, where, reg, slot); dr_save_arith_flags_to_reg(drcontext, ilist, where, reg); } DR_API void dr_restore_arith_flags(void *drcontext, instrlist_t *ilist, instr_t *where, dr_spill_slot_t slot) { reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0); CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only"); CLIENT_ASSERT(drcontext != NULL, "dr_restore_arith_flags: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_restore_arith_flags: drcontext is invalid"); CLIENT_ASSERT(slot <= SPILL_SLOT_MAX, "dr_restore_arith_flags: invalid spill slot selection"); dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg); dr_restore_reg(drcontext, ilist, where, reg, slot); } DR_API void dr_save_arith_flags_to_xax(void *drcontext, instrlist_t *ilist, instr_t *where) { reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0); CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only"); dr_save_arith_flags_to_reg(drcontext, ilist, where, reg); } DR_API void dr_restore_arith_flags_from_xax(void *drcontext, instrlist_t *ilist, instr_t *where) { reg_id_t reg = IF_X86_ELSE(DR_REG_XAX, DR_REG_R0); CLIENT_ASSERT(IF_X86_ELSE(true, false), "X86-only"); dr_restore_arith_flags_from_reg(drcontext, ilist, where, reg); } DR_API void dr_save_arith_flags_to_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_save_arith_flags_to_reg: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_save_arith_flags_to_reg: drcontext is invalid"); # ifdef X86 CLIENT_ASSERT(reg == DR_REG_XAX, "only xax should be used for save arith flags in X86"); /* flag saving code: * lahf * seto al */ MINSERT(ilist, where, INSTR_CREATE_lahf(dcontext)); MINSERT(ilist, where, INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL))); # elif defined(ARM) /* flag saving code: mrs reg, cpsr */ MINSERT( ilist, where, INSTR_CREATE_mrs(dcontext, opnd_create_reg(reg), opnd_create_reg(DR_REG_CPSR))); # elif defined(AARCH64) /* flag saving code: mrs reg, nzcv */ MINSERT( ilist, where, INSTR_CREATE_mrs(dcontext, opnd_create_reg(reg), opnd_create_reg(DR_REG_NZCV))); # endif /* X86/ARM/AARCH64 */ } DR_API void dr_restore_arith_flags_from_reg(void *drcontext, instrlist_t *ilist, instr_t *where, reg_id_t reg) { dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_restore_arith_flags_from_reg: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_restore_arith_flags_from_reg: drcontext is invalid"); # ifdef X86 CLIENT_ASSERT(reg == DR_REG_XAX, "only xax should be used for save arith flags in X86"); /* flag restoring code: * add 0x7f,%al * sahf */ /* do an add such that OF will be set only if seto set * the LSB of AL to 1 */ MINSERT(ilist, where, INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f))); MINSERT(ilist, where, INSTR_CREATE_sahf(dcontext)); # elif defined(ARM) /* flag restoring code: mrs reg, apsr_nzcvqg */ MINSERT(ilist, where, INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_CPSR), OPND_CREATE_INT_MSR_NZCVQG(), opnd_create_reg(reg))); # elif defined(AARCH64) /* flag restoring code: mrs reg, nzcv */ MINSERT( ilist, where, INSTR_CREATE_msr(dcontext, opnd_create_reg(DR_REG_NZCV), opnd_create_reg(reg))); # endif /* X86/ARM/AARCH64 */ } /* providing functionality of old -instr_calls and -instr_branches flags * * NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via * dr_insert_clean_call(). We guarantee to clients that all other slots * (except the XAX mcontext slot) will remain untouched. */ DR_API void dr_insert_call_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr, void *callee) { ptr_uint_t target, address; CLIENT_ASSERT(drcontext != NULL, "dr_insert_call_instrumentation: drcontext cannot be NULL"); address = (ptr_uint_t)instr_get_translation(instr); /* dr_insert_ubr_instrumentation() uses this function */ CLIENT_ASSERT(instr_is_call(instr) || instr_is_ubr(instr), "dr_insert_{ubr,call}_instrumentation must be applied to a ubr"); CLIENT_ASSERT(address != 0, "dr_insert_{ubr,call}_instrumentation: can't determine app address"); if (opnd_is_pc(instr_get_target(instr))) { if (opnd_is_far_pc(instr_get_target(instr))) { /* FIXME: handle far pc */ CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: far pc not supported"); } /* In release build for far pc keep going assuming 0 base */ target = (ptr_uint_t)opnd_get_pc(instr_get_target(instr)); } else if (opnd_is_instr(instr_get_target(instr))) { instr_t *tgt = opnd_get_instr(instr_get_target(instr)); target = (ptr_uint_t)instr_get_translation(tgt); CLIENT_ASSERT(target != 0, "dr_insert_{ubr,call}_instrumentation: unknown target"); if (opnd_is_far_instr(instr_get_target(instr))) { /* FIXME: handle far instr */ CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: far instr " "not supported"); } } else { CLIENT_ASSERT(false, "dr_insert_{ubr,call}_instrumentation: unknown target"); target = 0; } dr_insert_clean_call(drcontext, ilist, instr, callee, false /*no fpstate*/, 2, /* address of call is 1st parameter */ OPND_CREATE_INTPTR(address), /* call target is 2nd parameter */ OPND_CREATE_INTPTR(target)); } /* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via * dr_insert_clean_call(). We guarantee to clients that all other slots * (except the XAX mcontext slot) will remain untouched. Since we need another * tls spill slot in this routine we require the caller to give us one. */ DR_API void dr_insert_mbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr, void *callee, dr_spill_slot_t scratch_slot) { # ifdef X86 dcontext_t *dcontext = (dcontext_t *)drcontext; ptr_uint_t address = (ptr_uint_t)instr_get_translation(instr); opnd_t tls_opnd; instr_t *newinst; reg_id_t reg_target; /* PR 214051: dr_insert_mbr_instrumentation() broken with -indcall2direct */ CLIENT_ASSERT(!DYNAMO_OPTION(indcall2direct), "dr_insert_mbr_instrumentation not supported with -opt_speed"); CLIENT_ASSERT(drcontext != NULL, "dr_insert_mbr_instrumentation: drcontext cannot be NULL"); CLIENT_ASSERT(address != 0, "dr_insert_mbr_instrumentation: can't determine app address"); CLIENT_ASSERT(instr_is_mbr(instr), "dr_insert_mbr_instrumentation must be applied to an mbr"); /* We need a TLS spill slot to use. We can use any tls slot that is opnd * accessible. */ CLIENT_ASSERT(scratch_slot <= dr_max_opnd_accessible_spill_slot(), "dr_insert_mbr_instrumentation: scratch_slot must be less than " "dr_max_opnd_accessible_spill_slot()"); /* It is possible for mbr instruction to use XCX register, so we have * to use an unsed register. */ for (reg_target = REG_XAX; reg_target <= REG_XBX; reg_target++) { if (!instr_uses_reg(instr, reg_target)) break; } /* PR 240265: we disallow clients to add post-mbr instrumentation, so we * avoid doing that here even though it's a little less efficient since * our mbr mangling will re-grab the target. * We could keep it post-mbr and mark it w/ a special flag so we allow * our own but not clients' instrumentation post-mbr: but then we * hit post-syscall issues for wow64 where post-mbr equals post-syscall * (PR 240258: though we might solve that some other way). */ /* Note that since we're using a client exposed slot we know it will be * preserved across the clean call. */ tls_opnd = dr_reg_spill_slot_opnd(drcontext, scratch_slot); newinst = XINST_CREATE_store(dcontext, tls_opnd, opnd_create_reg(reg_target)); /* PR 214962: ensure we'll properly translate the de-ref of app * memory by marking the spill and de-ref as INSTR_OUR_MANGLING. */ instr_set_our_mangling(newinst, true); MINSERT(ilist, instr, newinst); if (instr_is_return(instr)) { /* the retaddr operand is always the final source for all OP_ret* instrs */ opnd_t retaddr = instr_get_src(instr, instr_num_srcs(instr) - 1); opnd_size_t sz = opnd_get_size(retaddr); /* Even for far ret and iret, retaddr is at TOS * but operand size needs to be set to stack size * since iret pops more than return address. */ opnd_set_size(&retaddr, OPSZ_STACK); newinst = instr_create_1dst_1src(dcontext, sz == OPSZ_2 ? OP_movzx : OP_mov_ld, opnd_create_reg(reg_target), retaddr); } else { /* call* or jmp* */ opnd_t src = instr_get_src(instr, 0); opnd_size_t sz = opnd_get_size(src); /* if a far cti, we can't fit it into a register: asserted above. * in release build we'll get just the address here. */ if (instr_is_far_cti(instr)) { if (sz == OPSZ_10) { sz = OPSZ_8; } else if (sz == OPSZ_6) { sz = OPSZ_4; # ifdef X64 reg_target = reg_64_to_32(reg_target); # endif } else /* target has OPSZ_4 */ { sz = OPSZ_2; } opnd_set_size(&src, sz); } # ifdef UNIX /* xref i#1834 the problem with fs and gs segment is a general problem * on linux, this fix is specific for mbr_instrumentation, but a general * solution is needed. */ if (INTERNAL_OPTION(mangle_app_seg) && opnd_is_far_base_disp(src)) { src = mangle_seg_ref_opnd(dcontext, ilist, instr, src, reg_target); } # endif newinst = instr_create_1dst_1src(dcontext, sz == OPSZ_2 ? OP_movzx : OP_mov_ld, opnd_create_reg(reg_target), src); } instr_set_our_mangling(newinst, true); MINSERT(ilist, instr, newinst); /* Now we want the true app state saved, for dr_get_mcontext(). * We specially recognize our OP_xchg as a restore in * instr_is_reg_spill_or_restore(). */ MINSERT(ilist, instr, INSTR_CREATE_xchg(dcontext, tls_opnd, opnd_create_reg(reg_target))); dr_insert_clean_call(drcontext, ilist, instr, callee, false /*no fpstate*/, 2, /* address of mbr is 1st param */ OPND_CREATE_INTPTR(address), /* indirect target (in tls, xchg-d from ecx) is 2nd param */ tls_opnd); # elif defined(ARM) /* i#1551: NYI on ARM. * Also, we may want to split these out into arch/{x86,arm}/ files */ ASSERT_NOT_IMPLEMENTED(false); # endif /* X86/ARM */ } /* NOTE : this routine clobbers TLS_XAX_SLOT and the XSP mcontext slot via * dr_insert_clean_call(). We guarantee to clients that all other slots * (except the XAX mcontext slot) will remain untouched. * * NOTE : this routine has assumption about the layout of the clean call, * so any change to clean call instrumentation layout may break this routine. */ static void dr_insert_cbr_instrumentation_help(void *drcontext, instrlist_t *ilist, instr_t *instr, void *callee, bool has_fallthrough, opnd_t user_data) { # ifdef X86 dcontext_t *dcontext = (dcontext_t *)drcontext; ptr_uint_t address, target; int opc; instr_t *app_flags_ok; bool out_of_line_switch = false; ; CLIENT_ASSERT(drcontext != NULL, "dr_insert_cbr_instrumentation: drcontext cannot be NULL"); address = (ptr_uint_t)instr_get_translation(instr); CLIENT_ASSERT(address != 0, "dr_insert_cbr_instrumentation: can't determine app address"); CLIENT_ASSERT(instr_is_cbr(instr), "dr_insert_cbr_instrumentation must be applied to a cbr"); CLIENT_ASSERT(opnd_is_near_pc(instr_get_target(instr)) || opnd_is_near_instr(instr_get_target(instr)), "dr_insert_cbr_instrumentation: target opnd must be a near pc or " "near instr"); if (opnd_is_near_pc(instr_get_target(instr))) target = (ptr_uint_t)opnd_get_pc(instr_get_target(instr)); else if (opnd_is_near_instr(instr_get_target(instr))) { instr_t *tgt = opnd_get_instr(instr_get_target(instr)); target = (ptr_uint_t)instr_get_translation(tgt); CLIENT_ASSERT(target != 0, "dr_insert_cbr_instrumentation: unknown target"); } else { CLIENT_ASSERT(false, "dr_insert_cbr_instrumentation: unknown target"); target = 0; } app_flags_ok = instr_get_prev(instr); if (has_fallthrough) { ptr_uint_t fallthrough = address + instr_length(drcontext, instr); CLIENT_ASSERT(!opnd_uses_reg(user_data, DR_REG_XBX), "register ebx should not be used"); CLIENT_ASSERT(fallthrough > address, "wrong fallthrough address"); dr_insert_clean_call(drcontext, ilist, instr, callee, false /*no fpstate*/, 5, /* push address of mbr onto stack as 1st parameter */ OPND_CREATE_INTPTR(address), /* target is 2nd parameter */ OPND_CREATE_INTPTR(target), /* fall-throug is 3rd parameter */ OPND_CREATE_INTPTR(fallthrough), /* branch direction (put in ebx below) is 4th parameter */ opnd_create_reg(REG_XBX), /* user defined data is 5th parameter */ opnd_is_null(user_data) ? OPND_CREATE_INT32(0) : user_data); } else { dr_insert_clean_call(drcontext, ilist, instr, callee, false /*no fpstate*/, 3, /* push address of mbr onto stack as 1st parameter */ OPND_CREATE_INTPTR(address), /* target is 2nd parameter */ OPND_CREATE_INTPTR(target), /* branch direction (put in ebx below) is 3rd parameter */ opnd_create_reg(REG_XBX)); } /* calculate whether branch taken or not * since the clean call mechanism clobbers eflags, we * must insert our checks prior to that clobbering. * since we do it AFTER the pusha, we don't have to save; but, we * can't use a param that's part of any calling convention b/c w/ * PR 250976 our clean call will get it from the pusha. * ebx is a good choice. */ /* We expect: mov 0x400e5e34 -> %esp pusha %esp %eax %ebx %ecx %edx %ebp %esi %edi -> %esp (%esp) pushf %esp -> %esp (%esp) push $0x00000000 %esp -> %esp (%esp) popf %esp (%esp) -> %esp mov 0x400e5e40 -> %eax push %eax %esp -> %esp (%esp) * We also assume all clean call instrs are expanded. */ /* Because the clean call might be optimized, we cannot assume the sequence. * We assume that the clean call will not be inlined for having more than one * arguments, so we scan to find either a call instr or a popf. * if a popf, do as before. * if a call, move back to right before push xbx or mov rbx => r3. */ if (app_flags_ok == NULL) app_flags_ok = instrlist_first(ilist); /* r2065 added out-of-line clean call context switch, so we need to check * how the context switch code is inserted. */ while (!instr_opcode_valid(app_flags_ok) || instr_get_opcode(app_flags_ok) != OP_call) { app_flags_ok = instr_get_next(app_flags_ok); CLIENT_ASSERT(app_flags_ok != NULL, "dr_insert_cbr_instrumentation: cannot find call instr"); if (instr_get_opcode(app_flags_ok) == OP_popf) break; } if (instr_get_opcode(app_flags_ok) == OP_call) { if (opnd_get_pc(instr_get_target(app_flags_ok)) == (app_pc)callee) { /* call to clean callee * move a few instrs back till right before push xbx, or mov rbx => r3 */ while (app_flags_ok != NULL) { if (instr_reg_in_src(app_flags_ok, DR_REG_XBX)) break; app_flags_ok = instr_get_prev(app_flags_ok); } } else { /* call to clean call context save */ ASSERT(opnd_get_pc(instr_get_target(app_flags_ok)) == get_clean_call_save(dcontext _IF_X64(GENCODE_X64))); out_of_line_switch = true; } ASSERT(app_flags_ok != NULL); } /* i#1155: for out-of-line context switch * we insert two parts of code to setup "taken" arg for clean call: * - compute "taken" and put it onto the stack right before call to context * save, where DR already swapped stack and adjusted xsp to point beyond * mcontext plus temp stack size. * It is 2 slots away b/c 1st is retaddr. * - move the "taken" from stack to ebx to compatible with existing code * right after context save returns and before arg setup, where xsp * points beyond mcontext (xref emit_clean_call_save). * It is 2 slots + temp stack size away. * XXX: we could optimize the code by computing "taken" after clean call * save if the eflags are not cleared. */ /* put our code before the popf or use of xbx */ opc = instr_get_opcode(instr); if (opc == OP_jecxz || opc == OP_loop || opc == OP_loope || opc == OP_loopne) { /* for 8-bit cbrs w/ multiple conditions and state, simpler to * simply execute them -- they're rare so shouldn't be a perf hit. * after all, ecx is saved, can clobber it. * we do: * loop/jecxz taken * not_taken: mov 0, ebx * jmp done * taken: mov 1, ebx * done: */ opnd_t opnd_taken = out_of_line_switch ? /* 2 slots away from xsp, xref comment above for i#1155 */ OPND_CREATE_MEM32(REG_XSP, -2 * (int)XSP_SZ /* ret+taken */) : opnd_create_reg(REG_EBX); instr_t *branch = instr_clone(dcontext, instr); instr_t *not_taken = INSTR_CREATE_mov_imm(dcontext, opnd_taken, OPND_CREATE_INT32(0)); instr_t *taken = INSTR_CREATE_mov_imm(dcontext, opnd_taken, OPND_CREATE_INT32(1)); instr_t *done = INSTR_CREATE_label(dcontext); instr_set_target(branch, opnd_create_instr(taken)); /* client-added meta instrs should not have translation set */ instr_set_translation(branch, NULL); MINSERT(ilist, app_flags_ok, branch); MINSERT(ilist, app_flags_ok, not_taken); MINSERT(ilist, app_flags_ok, INSTR_CREATE_jmp_short(dcontext, opnd_create_instr(done))); MINSERT(ilist, app_flags_ok, taken); MINSERT(ilist, app_flags_ok, done); if (out_of_line_switch) { if (opc == OP_loop || opc == OP_loope || opc == OP_loopne) { /* We executed OP_loop* before we saved xcx, so we must restore * it. We should be able to use OP_lea b/c OP_loop* uses * addr prefix to shrink pointer-sized xcx, not data prefix. */ reg_id_t xcx = opnd_get_reg(instr_get_dst(instr, 0)); MINSERT(ilist, app_flags_ok, INSTR_CREATE_lea( dcontext, opnd_create_reg(xcx), opnd_create_base_disp(xcx, DR_REG_NULL, 0, 1, OPSZ_lea))); } ASSERT(instr_get_opcode(app_flags_ok) == OP_call); /* 2 slots + temp_stack_size away from xsp, * xref comment above for i#1155 */ opnd_taken = OPND_CREATE_MEM32( REG_XSP, -2 * (int)XSP_SZ - get_clean_call_temp_stack_size()); MINSERT(ilist, instr_get_next(app_flags_ok), XINST_CREATE_load(dcontext, opnd_create_reg(REG_EBX), opnd_taken)); } } else { /* build a setcc equivalent of instr's jcc operation * WARNING: this relies on order of OP_ enum! */ opnd_t opnd_taken = out_of_line_switch ? /* 2 slots away from xsp, xref comment above for i#1155 */ OPND_CREATE_MEM8(REG_XSP, -2 * (int)XSP_SZ /* ret+taken */) : opnd_create_reg(REG_BL); opc = instr_get_opcode(instr); if (opc <= OP_jnle_short) opc += (OP_jo - OP_jo_short); CLIENT_ASSERT(opc >= OP_jo && opc <= OP_jnle, "dr_insert_cbr_instrumentation: unknown opcode"); opc = opc - OP_jo + OP_seto; MINSERT(ilist, app_flags_ok, INSTR_CREATE_setcc(dcontext, opc, opnd_taken)); if (out_of_line_switch) { app_flags_ok = instr_get_next(app_flags_ok); /* 2 slots + temp_stack_size away from xsp, * xref comment above for i#1155 */ opnd_taken = OPND_CREATE_MEM8( REG_XSP, -2 * (int)XSP_SZ - get_clean_call_temp_stack_size()); } /* movzx ebx <- bl */ MINSERT(ilist, app_flags_ok, INSTR_CREATE_movzx(dcontext, opnd_create_reg(REG_EBX), opnd_taken)); } /* now branch dir is in ebx and will be passed to clean call */ # elif defined(ARM) /* i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); # endif /* X86/ARM */ } DR_API void dr_insert_cbr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr, void *callee) { dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee, false /* no fallthrough */, opnd_create_null()); } DR_API void dr_insert_cbr_instrumentation_ex(void *drcontext, instrlist_t *ilist, instr_t *instr, void *callee, opnd_t user_data) { dr_insert_cbr_instrumentation_help(drcontext, ilist, instr, callee, true /* has fallthrough */, user_data); } DR_API void dr_insert_ubr_instrumentation(void *drcontext, instrlist_t *ilist, instr_t *instr, void *callee) { /* same as call */ dr_insert_call_instrumentation(drcontext, ilist, instr, callee); } /* This may seem like a pretty targeted API function, but there's no * clean way for a client to do this on its own due to DR's * restrictions on bb instrumentation (i#782). */ DR_API bool dr_clobber_retaddr_after_read(void *drcontext, instrlist_t *ilist, instr_t *instr, ptr_uint_t value) { /* the client could be using note fields so we use a label and xfer to * a note field during the mangling pass */ if (instr_is_return(instr)) { instr_t *label = INSTR_CREATE_label(drcontext); dr_instr_label_data_t *data = instr_get_label_data_area(label); /* we could coordinate w/ drmgr and use some reserved note label value * but only if we run out of instr flags. so we set to 0 to not * overlap w/ any client uses (DRMGR_NOTE_NONE == 0). */ label->note = 0; /* these values are read back in d_r_mangle() */ data->data[0] = (ptr_uint_t)instr; data->data[1] = value; label->flags |= INSTR_CLOBBER_RETADDR; instr->flags |= INSTR_CLOBBER_RETADDR; instrlist_meta_preinsert(ilist, instr, label); return true; } return false; } DR_API bool dr_mcontext_xmm_fields_valid(void) { return preserve_xmm_caller_saved(); } DR_API bool dr_mcontext_zmm_fields_valid(void) { # ifdef X86 return d_r_is_avx512_code_in_use(); # else return false; # endif } #endif /* CLIENT_INTERFACE */ /* dr_get_mcontext() needed for translating clean call arg errors */ /* Fills in whichever of dmc or mc is non-NULL */ bool dr_get_mcontext_priv(dcontext_t *dcontext, dr_mcontext_t *dmc, priv_mcontext_t *mc) { priv_mcontext_t *state; CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)), "DR context protection NYI"); if (mc == NULL) { CLIENT_ASSERT(dmc != NULL, "invalid context"); /* catch uses that forget to set size: perhaps in a few releases, * when most old clients have been converted, remove this (we'll * still return false) */ CLIENT_ASSERT(dmc->size == sizeof(dr_mcontext_t), "dr_mcontext_t.size field not set properly"); CLIENT_ASSERT(dmc->flags != 0 && (dmc->flags & ~(DR_MC_ALL)) == 0, "dr_mcontext_t.flags field not set properly"); } else CLIENT_ASSERT(dmc == NULL, "invalid internal params"); #ifdef CLIENT_INTERFACE /* i#117/PR 395156: support getting mcontext from events where mcontext is * stable. It would be nice to support it from init and 1st thread init, * but the mcontext is not available at those points. * * Since DR calls this routine when recreating state and wants the * clean call version, can't distinguish by whereami=DR_WHERE_FCACHE, * so we set a flag in the supported events. If client routine * crashes and we recreate then we want clean call version anyway * so should be ok. Note that we want in_pre_syscall for other * reasons (dr_syscall_set_param() for Windows) so we keep it a * separate flag. */ /* no support for init or initial thread init */ if (!dynamo_initialized) return false; if (dcontext->client_data->cur_mc != NULL) { if (mc != NULL) *mc = *dcontext->client_data->cur_mc; else if (!priv_mcontext_to_dr_mcontext(dmc, dcontext->client_data->cur_mc)) return false; return true; } if (!is_os_cxt_ptr_null(dcontext->client_data->os_cxt)) { return os_context_to_mcontext(dmc, mc, dcontext->client_data->os_cxt); } if (dcontext->client_data->suspended) { /* A thread suspended by dr_suspend_all_other_threads() has its * context translated lazily here. * We cache the result in cur_mc to avoid a translation cost next time. */ bool res; priv_mcontext_t *mc_xl8; if (mc != NULL) mc_xl8 = mc; else { dcontext->client_data->cur_mc = (priv_mcontext_t *)heap_alloc( dcontext, sizeof(*dcontext->client_data->cur_mc) HEAPACCT(ACCT_CLIENT)); /* We'll clear this cache in dr_resume_all_other_threads() */ mc_xl8 = dcontext->client_data->cur_mc; } res = thread_get_mcontext(dcontext->thread_record, mc_xl8); CLIENT_ASSERT(res, "failed to get mcontext of suspended thread"); res = translate_mcontext(dcontext->thread_record, mc_xl8, false /*do not restore memory*/, NULL); CLIENT_ASSERT(res, "failed to xl8 mcontext of suspended thread"); if (mc == NULL && !priv_mcontext_to_dr_mcontext(dmc, mc_xl8)) return false; return true; } /* PR 207947: support mcontext access from syscall events */ if (dcontext->client_data->mcontext_in_dcontext || dcontext->client_data->in_pre_syscall || dcontext->client_data->in_post_syscall) { if (mc != NULL) *mc = *get_mcontext(dcontext); else if (!priv_mcontext_to_dr_mcontext(dmc, get_mcontext(dcontext))) return false; return true; } #endif /* dr_prepare_for_call() puts the machine context on the dstack * with pusha and pushf, but only fills in xmm values for * preserve_xmm_caller_saved(): however, we tell the client that the xmm * fields are not valid otherwise. so, we just have to copy the * state from the dstack. */ state = get_priv_mcontext_from_dstack(dcontext); if (mc != NULL) *mc = *state; else if (!priv_mcontext_to_dr_mcontext(dmc, state)) return false; /* esp is a dstack value -- get the app stack's esp from the dcontext */ if (mc != NULL) mc->xsp = get_mcontext(dcontext)->xsp; else if (TEST(DR_MC_CONTROL, dmc->flags)) dmc->xsp = get_mcontext(dcontext)->xsp; #ifdef ARM if (TEST(DR_MC_INTEGER, dmc->flags)) { /* get the stolen register's app value */ if (mc != NULL) { set_stolen_reg_val(mc, (reg_t)d_r_get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT))); } else { set_stolen_reg_val(dr_mcontext_as_priv_mcontext(dmc), (reg_t)d_r_get_tls(os_tls_offset(TLS_REG_STOLEN_SLOT))); } } #endif /* XXX: should we set the pc field? * If we do we'll have to adopt a different solution for i#1685 in our Windows * hooks where today we use the pc slot for temp storage. */ return true; } DR_API bool dr_get_mcontext(void *drcontext, dr_mcontext_t *dmc) { dcontext_t *dcontext = (dcontext_t *)drcontext; return dr_get_mcontext_priv(dcontext, dmc, NULL); } #ifdef CLIENT_INTERFACE DR_API bool dr_set_mcontext(void *drcontext, dr_mcontext_t *context) { priv_mcontext_t *state; dcontext_t *dcontext = (dcontext_t *)drcontext; IF_ARM(reg_t reg_val = 0 /* silence the compiler warning */;) CLIENT_ASSERT(!TEST(SELFPROT_DCONTEXT, DYNAMO_OPTION(protect_mask)), "DR context protection NYI"); CLIENT_ASSERT(context != NULL, "invalid context"); CLIENT_ASSERT(context->size == sizeof(dr_mcontext_t), "dr_mcontext_t.size field not set properly"); CLIENT_ASSERT(context->flags != 0 && (context->flags & ~(DR_MC_ALL)) == 0, "dr_mcontext_t.flags field not set properly"); /* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */ /* PR 207947: support mcontext access from syscall events */ if (dcontext->client_data->mcontext_in_dcontext || dcontext->client_data->in_pre_syscall || dcontext->client_data->in_post_syscall) { if (!dr_mcontext_to_priv_mcontext(get_mcontext(dcontext), context)) return false; return true; } if (dcontext->client_data->cur_mc != NULL) { return dr_mcontext_to_priv_mcontext(dcontext->client_data->cur_mc, context); } if (!is_os_cxt_ptr_null(dcontext->client_data->os_cxt)) { /* It would be nice to fail for #DR_XFER_CALLBACK_RETURN but we'd need to * store yet more state to do so. The pc will be ignored, and xsi * changes will likely cause crashes. */ return mcontext_to_os_context(dcontext->client_data->os_cxt, context, NULL); } /* copy the machine context to the dstack area created with * dr_prepare_for_call(). note that xmm0-5 copied there * will override any save_fpstate xmm values, as desired. */ state = get_priv_mcontext_from_dstack(dcontext); # ifdef ARM if (TEST(DR_MC_INTEGER, context->flags)) { /* Set the stolen register's app value in TLS, not on stack (we rely * on our stolen reg retaining its value on the stack) */ priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(context); d_r_set_tls(os_tls_offset(TLS_REG_STOLEN_SLOT), (void *)get_stolen_reg_val(mc)); /* save the reg val on the stack to be clobbered by the the copy below */ reg_val = get_stolen_reg_val(state); } # endif if (!dr_mcontext_to_priv_mcontext(state, context)) return false; # ifdef ARM if (TEST(DR_MC_INTEGER, context->flags)) { /* restore the reg val on the stack clobbered by the copy above */ set_stolen_reg_val(state, reg_val); } # endif if (TEST(DR_MC_CONTROL, context->flags)) { /* esp will be restored from a field in the dcontext */ get_mcontext(dcontext)->xsp = context->xsp; } /* XXX: should we support setting the pc field? */ return true; } DR_API bool dr_redirect_execution(dr_mcontext_t *mcontext) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL); CLIENT_ASSERT(mcontext->size == sizeof(dr_mcontext_t), "dr_mcontext_t.size field not set properly"); CLIENT_ASSERT(mcontext->flags == DR_MC_ALL, "dr_mcontext_t.flags must be DR_MC_ALL"); /* PR 352429: squash current trace. * FIXME: will clients use this so much that this will be a perf issue? * samples/cbr doesn't hit this even at -trace_threshold 1 */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_INTERP, 1, "squashing trace-in-progress\n"); trace_abort(dcontext); } dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc); dcontext->whereami = DR_WHERE_FCACHE; set_last_exit(dcontext, (linkstub_t *)get_client_linkstub()); # ifdef CLIENT_INTERFACE if (kernel_xfer_callbacks.num > 0) { /* This can only be called from a clean call or an exception event. * For both of those we can get the current mcontext via dr_get_mcontext() * (the latter b/c we explicitly store to cur_mc just for this use case). */ dr_mcontext_t src_dmc; src_dmc.size = sizeof(src_dmc); src_dmc.flags = DR_MC_CONTROL | DR_MC_INTEGER; dr_get_mcontext(dcontext, &src_dmc); if (instrument_kernel_xfer(dcontext, DR_XFER_CLIENT_REDIRECT, osc_empty, &src_dmc, NULL, dcontext->next_tag, mcontext->xsp, osc_empty, dr_mcontext_as_priv_mcontext(mcontext), 0)) dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc); } # endif transfer_to_dispatch(dcontext, dr_mcontext_as_priv_mcontext(mcontext), true /*full_DR_state*/); /* on success we won't get here */ return false; } DR_API byte * dr_redirect_native_target(void *drcontext) { # ifdef PROGRAM_SHEPHERDING /* This feature is unavail for prog shep b/c of the cross-ib-type pollution, * as well as the lack of source tag info when exiting the ibl (i#1150). */ return NULL; # else dcontext_t *dcontext = (dcontext_t *)drcontext; CLIENT_ASSERT(drcontext != NULL, "dr_redirect_native_target(): drcontext cannot be NULL"); /* The client has no way to know the mode of our gencode so we set LSB here */ return PC_AS_JMP_TGT(DEFAULT_ISA_MODE, get_client_ibl_xfer_entry(dcontext)); # endif } /*************************************************************************** * ADAPTIVE OPTIMIZATION SUPPORT * *Note for non owning thread support (i.e. sideline) all methods assume * the dcontext valid, the client will have to insure this with a lock * on thread_exit!! * * *need way for side thread to get a dcontext to use for logging and mem * alloc, before do that should think more about mem alloc in/for adaptive * routines * * *made local mem alloc by side thread safe (see heap.c) * * *loging not safe if not owning thread? */ DR_API /* Schedules the fragment to be deleted. Once this call is completed, * an existing executing fragment is allowed to complete, but control * will not enter the fragment again before it is deleted. * * NOTE: this comment used to say, "after deletion, control may still * reach the fragment by indirect branch.". We believe this is now only * true for shared fragments, which are not currently supported. */ bool dr_delete_fragment(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f; bool deletable = false, waslinking; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(), "dr_delete_fragment() only valid with -thread_private"); CLIENT_ASSERT(drcontext != NULL, "dr_delete_fragment(): drcontext cannot be NULL"); /* i#1989: there's no easy way to get a translation without a proper dcontext */ CLIENT_ASSERT(!fragment_thread_exited(dcontext), "dr_delete_fragment not supported from the thread exit event"); if (fragment_thread_exited(dcontext)) return false; waslinking = is_couldbelinking(dcontext); if (!waslinking) enter_couldbelinking(dcontext, NULL, false); # ifdef CLIENT_SIDELINE d_r_mutex_lock(&(dcontext->client_data->sideline_mutex)); fragment_get_fragment_delete_mutex(dcontext); # else CLIENT_ASSERT(drcontext == get_thread_private_dcontext(), "dr_delete_fragment(): drcontext does not belong to current thread"); # endif f = fragment_lookup(dcontext, tag); if (f != NULL && (f->flags & FRAG_CANNOT_DELETE) == 0) { client_todo_list_t *todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t, ACCT_CLIENT, UNPROTECTED); client_todo_list_t *iter = dcontext->client_data->to_do; todo->next = NULL; todo->ilist = NULL; todo->tag = tag; if (iter == NULL) dcontext->client_data->to_do = todo; else { while (iter->next != NULL) iter = iter->next; iter->next = todo; } deletable = true; /* unlink fragment so will return to dynamo and delete. * Do not remove the fragment from the hashtable -- * we need to be able to look up the fragment when * inspecting the to_do list in d_r_dispatch. */ if ((f->flags & FRAG_LINKED_INCOMING) != 0) unlink_fragment_incoming(dcontext, f); fragment_remove_from_ibt_tables(dcontext, f, false); } # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); d_r_mutex_unlock(&(dcontext->client_data->sideline_mutex)); # endif if (!waslinking) enter_nolinking(dcontext, NULL, false); return deletable; } DR_API /* Schedules the fragment at 'tag' for replacement. Once this call is * completed, an existing executing fragment is allowed to complete, * but control will not enter the fragment again before it is replaced. * * NOTE: this comment used to say, "after replacement, control may still * reach the fragment by indirect branch.". We believe this is now only * true for shared fragments, which are not currently supported. * * Takes control of the ilist and all responsibility for deleting it and the * instrs inside of it. The client should not keep, use, reference, etc. the * instrlist or any of the instrs it contains after they are passed in. */ bool dr_replace_fragment(void *drcontext, void *tag, instrlist_t *ilist) { dcontext_t *dcontext = (dcontext_t *)drcontext; bool frag_found, waslinking; fragment_t *f; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); CLIENT_ASSERT(!SHARED_FRAGMENTS_ENABLED(), "dr_replace_fragment() only valid with -thread_private"); CLIENT_ASSERT(drcontext != NULL, "dr_replace_fragment(): drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_replace_fragment: drcontext is invalid"); /* i#1989: there's no easy way to get a translation without a proper dcontext */ CLIENT_ASSERT(!fragment_thread_exited(dcontext), "dr_replace_fragment not supported from the thread exit event"); if (fragment_thread_exited(dcontext)) return false; waslinking = is_couldbelinking(dcontext); if (!waslinking) enter_couldbelinking(dcontext, NULL, false); # ifdef CLIENT_SIDELINE d_r_mutex_lock(&(dcontext->client_data->sideline_mutex)); fragment_get_fragment_delete_mutex(dcontext); # else CLIENT_ASSERT(drcontext == get_thread_private_dcontext(), "dr_replace_fragment(): drcontext does not belong to current thread"); # endif f = fragment_lookup(dcontext, tag); frag_found = (f != NULL); if (frag_found) { client_todo_list_t *iter = dcontext->client_data->to_do; client_todo_list_t *todo = HEAP_TYPE_ALLOC(dcontext, client_todo_list_t, ACCT_CLIENT, UNPROTECTED); todo->next = NULL; todo->ilist = ilist; todo->tag = tag; if (iter == NULL) dcontext->client_data->to_do = todo; else { while (iter->next != NULL) iter = iter->next; iter->next = todo; } /* unlink fragment so will return to dynamo and replace for next time * its executed */ if ((f->flags & FRAG_LINKED_INCOMING) != 0) unlink_fragment_incoming(dcontext, f); fragment_remove_from_ibt_tables(dcontext, f, false); } # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); d_r_mutex_unlock(&(dcontext->client_data->sideline_mutex)); # endif if (!waslinking) enter_nolinking(dcontext, NULL, false); return frag_found; } # ifdef UNSUPPORTED_API /* FIXME - doesn't work with shared fragments. Consider removing since dr_flush_region * and dr_delay_flush_region give us most of this functionality. */ DR_API /* Flushes all fragments containing 'flush_tag', or the entire code * cache if flush_tag is NULL. 'curr_tag' must specify the tag of the * currently-executing fragment. If curr_tag is NULL, flushing can be * delayed indefinitely. Note that flushing is performed across all * threads, but other threads may continue to execute fragments * containing 'curr_tag' until those fragments finish. */ void dr_flush_fragments(void *drcontext, void *curr_tag, void *flush_tag) { client_flush_req_t *iter, *flush; dcontext_t *dcontext = (dcontext_t *)drcontext; /* We want to unlink the currently executing fragment so we'll * force a context switch to DR. That way, we'll perform the * flush as soon as possible. Unfortunately, the client may not * know the tag of the current trace. Therefore, we unlink all * fragments in the region. * * Note that we aren't unlinking or ibl-invalidating (i.e., making * unreachable) any fragments in other threads containing curr_tag * until the delayed flush happens in enter_nolinking(). */ if (curr_tag != NULL) vm_area_unlink_incoming(dcontext, (app_pc)curr_tag); flush = HEAP_TYPE_ALLOC(dcontext, client_flush_req_t, ACCT_CLIENT, UNPROTECTED); flush->flush_callback = NULL; if (flush_tag == NULL) { flush->start = UNIVERSAL_REGION_BASE; flush->size = UNIVERSAL_REGION_SIZE; } else { flush->start = (app_pc)flush_tag; flush->size = 1; } flush->next = NULL; iter = dcontext->client_data->flush_list; if (iter == NULL) { dcontext->client_data->flush_list = flush; } else { while (iter->next != NULL) iter = iter->next; iter->next = flush; } } # endif /* UNSUPPORTED_API */ DR_API /* Flush all fragments that contain code from the region [start, start+size). * Uses a synchall flush to guarantee that no execution occurs out of the fragments * flushed once this returns. Requires caller to be holding no locks (dr or client) and * to be !couldbelinking (xref PR 199115, 227619). Caller must use * dr_redirect_execution() to return to the cache. */ bool dr_flush_region(app_pc start, size_t size) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL); LOG(THREAD, LOG_FRAGMENT, 2, "%s: " PFX "-" PFX "\n", __FUNCTION__, start, start + size); /* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are * !couldbelinking (see PR 227619) restricting where this routine can be used. */ CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event " "callback that doesn't support calling this routine; see header file " "for restrictions."); /* Flush requires caller to hold no locks that might block a couldbelinking thread * (which includes almost all dr locks). FIXME - some event callbacks are holding * dr locks (see PR 227619) so can't call this routine. Since we are going to use * a synchall flush, holding client locks is disallowed too (could block a thread * at an unsafe spot for synch). */ CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client " "lock or was called from an event callback that doesn't support " "calling this routine; see header file for restrictions."); CLIENT_ASSERT(size != 0, "dr_flush_region: 0 is invalid size for flush"); /* release build check of requirements, as many as possible at least */ if (size == 0 || is_couldbelinking(dcontext)) return false; if (!executable_vm_area_executed_from(start, start + size)) return true; flush_fragments_from_region(dcontext, start, size, true /*force synchall*/); return true; } DR_API /* Flush all fragments that contain code from the region [start, start+size). * Uses an unlink flush which guarantees that no thread will enter a fragment that was * flushed once this returns (threads already in a flushed fragment will continue). * Requires caller to be holding no locks (dr or client) and to be !couldbelinking * (xref PR 199115, 227619). */ bool dr_unlink_flush_region(app_pc start, size_t size) { dcontext_t *dcontext = get_thread_private_dcontext(); CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL); LOG(THREAD, LOG_FRAGMENT, 2, "%s: " PFX "-" PFX "\n", __FUNCTION__, start, start + size); /* This routine won't work with coarse_units */ CLIENT_ASSERT(!DYNAMO_OPTION(coarse_units), /* as of now, coarse_units are always disabled with -thread_private. */ "dr_unlink_flush_region is not supported with -opt_memory unless " "-thread_private or -enable_full_api is also specified"); /* Flush requires !couldbelinking. FIXME - not all event callbacks to the client are * !couldbelinking (see PR 227619) restricting where this routine can be used. */ CLIENT_ASSERT(!is_couldbelinking(dcontext), "dr_flush_region: called from an event " "callback that doesn't support calling this routine, see header file " "for restrictions."); /* Flush requires caller to hold no locks that might block a couldbelinking thread * (which includes almost all dr locks). FIXME - some event callbacks are holding * dr locks (see PR 227619) so can't call this routine. FIXME - some event callbacks * are couldbelinking (see PR 227619) so can't allow the caller to hold any client * locks that could block threads in one of those events (otherwise we don't need * to care about client locks) */ CLIENT_ASSERT(OWN_NO_LOCKS(dcontext), "dr_flush_region: caller owns a client " "lock or was called from an event callback that doesn't support " "calling this routine, see header file for restrictions."); CLIENT_ASSERT(size != 0, "dr_unlink_flush_region: 0 is invalid size for flush"); /* release build check of requirements, as many as possible at least */ if (size == 0 || is_couldbelinking(dcontext)) return false; if (!executable_vm_area_executed_from(start, start + size)) return true; flush_fragments_from_region(dcontext, start, size, false /*don't force synchall*/); return true; } DR_API /* Flush all fragments that contain code from the region [start, start+size) at the next * convenient time. Unlike dr_flush_region() this routine has no restrictions on lock * or couldbelinking status; the downside is that the delay till the flush actually * occurs is unbounded (FIXME - we could do something safely here to try to speed it * up like unlinking shared_syscall etc.), but should occur before any new code is * executed or any nudges are processed. */ bool dr_delay_flush_region(app_pc start, size_t size, uint flush_id, void (*flush_completion_callback)(int flush_id)) { client_flush_req_t *flush; LOG(THREAD_GET, LOG_FRAGMENT, 2, "%s: " PFX "-" PFX "\n", __FUNCTION__, start, start + size); if (size == 0) { CLIENT_ASSERT(false, "dr_delay_flush_region: 0 is invalid size for flush"); return false; } /* With the new module load event at 1st execution (i#884), we get a lot of * flush requests during creation of a bb from things like drwrap_replace(). * To avoid them flushing from a new module we check overlap up front here. */ if (!executable_vm_area_executed_from(start, start + size)) { return true; } /* FIXME - would be nice if we could check the requirements and call * dr_unlink_flush_region() here if it's safe. Is difficult to detect non-dr locks * that could block a couldbelinking thread though. */ flush = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, client_flush_req_t, ACCT_CLIENT, UNPROTECTED); memset(flush, 0x0, sizeof(client_flush_req_t)); flush->start = (app_pc)start; flush->size = size; flush->flush_id = flush_id; flush->flush_callback = flush_completion_callback; d_r_mutex_lock(&client_flush_request_lock); flush->next = client_flush_requests; client_flush_requests = flush; d_r_mutex_unlock(&client_flush_request_lock); return true; } DR_API /* returns whether or not there is a fragment in the drcontext fcache at tag */ bool dr_fragment_exists_at(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f; # ifdef CLIENT_SIDELINE fragment_get_fragment_delete_mutex(dcontext); # endif f = fragment_lookup(dcontext, tag); # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); # endif return f != NULL; } DR_API bool dr_bb_exists_at(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f = fragment_lookup(dcontext, tag); if (f != NULL && !TEST(FRAG_IS_TRACE, f->flags)) { return true; } return false; } DR_API /* Looks up the fragment associated with the application pc tag. * If not found, returns 0. * If found, returns the total size occupied in the cache by the fragment. */ uint dr_fragment_size(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f; int size = 0; CLIENT_ASSERT(drcontext != NULL, "dr_fragment_size: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_fragment_size: drcontext is invalid"); # ifdef CLIENT_SIDELINE /* used to check to see if owning thread, if so don't need lock */ /* but the check for owning thread more expensive then just getting lock */ /* to check if owner d_r_get_thread_id() == dcontext->owning_thread */ fragment_get_fragment_delete_mutex(dcontext); # endif f = fragment_lookup(dcontext, tag); if (f == NULL) size = 0; else size = f->size; # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); # endif return size; } DR_API /* Retrieves the application PC of a fragment */ app_pc dr_fragment_app_pc(void *tag) { # ifdef WINDOWS tag = get_app_pc_from_intercept_pc_if_necessary((app_pc)tag); CLIENT_ASSERT(tag != NULL, "dr_fragment_app_pc shouldn't be NULL"); DODEBUG({ /* Without -hide our DllMain routine ends up in the cache (xref PR 223120). * On Linux fini() ends up in the cache. */ if (DYNAMO_OPTION(hide) && is_dynamo_address(tag) && /* support client interpreting code out of its library */ !is_in_client_lib(tag)) { /* downgraded from assert for client interpreting its own generated code */ SYSLOG_INTERNAL_WARNING_ONCE("dr_fragment_app_pc is a DR/client pc"); } }); # elif defined(LINUX) && defined(X86_32) /* Point back at our hook, undoing the bb shift for SA_RESTART (i#2659). */ if ((app_pc)tag == vsyscall_sysenter_displaced_pc) tag = vsyscall_sysenter_return_pc; # endif return tag; } DR_API /* i#268: opposite of dr_fragment_app_pc() */ app_pc dr_app_pc_for_decoding(app_pc pc) { # ifdef WINDOWS app_pc displaced; if (is_intercepted_app_pc(pc, &displaced)) return displaced; # endif return pc; } DR_API app_pc dr_app_pc_from_cache_pc(byte *cache_pc) { app_pc res = NULL; dcontext_t *dcontext = get_thread_private_dcontext(); bool waslinking; CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode"); ASSERT(dcontext != NULL); /* i#1989: there's no easy way to get a translation without a proper dcontext */ CLIENT_ASSERT(!fragment_thread_exited(dcontext), "dr_app_pc_from_cache_pc not supported from the thread exit event"); if (fragment_thread_exited(dcontext)) return NULL; waslinking = is_couldbelinking(dcontext); if (!waslinking) enter_couldbelinking(dcontext, NULL, false); /* suppress asserts about faults in meta instrs */ DODEBUG({ dcontext->client_data->is_translating = true; }); res = recreate_app_pc(dcontext, cache_pc, NULL); DODEBUG({ dcontext->client_data->is_translating = false; }); if (!waslinking) enter_nolinking(dcontext, NULL, false); return res; } DR_API bool dr_using_app_state(void *drcontext) { dcontext_t *dcontext = (dcontext_t *)drcontext; return os_using_app_state(dcontext); } DR_API void dr_switch_to_app_state(void *drcontext) { dr_switch_to_app_state_ex(drcontext, DR_STATE_ALL); } DR_API void dr_switch_to_app_state_ex(void *drcontext, dr_state_flags_t flags) { dcontext_t *dcontext = (dcontext_t *)drcontext; os_swap_context(dcontext, true /*to app*/, flags); } DR_API void dr_switch_to_dr_state(void *drcontext) { dr_switch_to_dr_state_ex(drcontext, DR_STATE_ALL); } DR_API void dr_switch_to_dr_state_ex(void *drcontext, dr_state_flags_t flags) { dcontext_t *dcontext = (dcontext_t *)drcontext; os_swap_context(dcontext, false /*to dr*/, flags); } /*************************************************************************** * CUSTOM TRACES SUPPORT * *could use a method to unmark a trace head, would be nice if DR * notified the client when it marked a trace head and gave the client a * chance to override its decision */ DR_API /* Marks the fragment associated with the application pc tag as * a trace head. The fragment need not exist yet -- once it is * created it will be marked as a trace head. * * DR associates a counter with a trace head and once it * passes the -hot_threshold parameter, DR begins building * a trace. Before each fragment is added to the trace, DR * calls the client routine dr_end_trace to determine whether * to end the trace. (dr_end_trace will be called both for * standard DR traces and for client-defined traces.) * * Note, some fragments are unsuitable for trace heads. DR will * ignore attempts to mark such fragments as trace heads and will return * false. If the client marks a fragment that doesn't exist yet as a trace * head and DR later determines that the fragment is unsuitable for * a trace head it will unmark the fragment as a trace head without * notifying the client. * * Returns true if the target fragment is marked as a trace head. * * If coarse, headness depends on path: currently this will only have * links from tag's coarse unit unlinked. */ bool /* FIXME: dynamorio_app_init returns an int! */ dr_mark_trace_head(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f; fragment_t coarse_f; bool success = true; CLIENT_ASSERT(drcontext != NULL, "dr_mark_trace_head: drcontext cannot be NULL"); CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_mark_trace_head: drcontext is invalid"); /* Required to make the future-fragment lookup and add atomic and for * mark_trace_head. We have to grab before fragment_delete_mutex so * we pay the cost of acquiring up front even when f->flags doesn't * require it. */ SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, acquire, change_linking_lock); # ifdef CLIENT_SIDELINE /* used to check to see if owning thread, if so don't need lock */ /* but the check for owning thread more expensive then just getting lock */ /* to check if owner d_r_get_thread_id() == dcontext->owning_thread */ fragment_get_fragment_delete_mutex(dcontext); # endif f = fragment_lookup_fine_and_coarse(dcontext, tag, &coarse_f, NULL); if (f == NULL) { future_fragment_t *fut; fut = fragment_lookup_future(dcontext, tag); if (fut == NULL) { /* need to create a future fragment */ fut = fragment_create_and_add_future(dcontext, tag, FRAG_IS_TRACE_HEAD); } else { /* don't call mark_trace_head, it will try to do some linking */ fut->flags |= FRAG_IS_TRACE_HEAD; } # ifndef CLIENT_SIDELINE LOG(THREAD, LOG_MONITOR, 2, "Client mark trace head : will mark fragment as trace head when built " ": address " PFX "\n", tag); # endif } else { /* check precluding conditions */ if (TEST(FRAG_IS_TRACE, f->flags)) { # ifndef CLIENT_SIDELINE LOG(THREAD, LOG_MONITOR, 2, "Client mark trace head : not marking as trace head, is already " "a trace : address " PFX "\n", tag); # endif success = false; } else if (TEST(FRAG_CANNOT_BE_TRACE, f->flags)) { # ifndef CLIENT_SIDELINE LOG(THREAD, LOG_MONITOR, 2, "Client mark trace head : not marking as trace head, particular " "fragment cannot be trace head : address " PFX "\n", tag); # endif success = false; } else if (TEST(FRAG_IS_TRACE_HEAD, f->flags)) { # ifndef CLIENT_SIDELINE LOG(THREAD, LOG_MONITOR, 2, "Client mark trace head : fragment already marked as trace head : " "address " PFX "\n", tag); # endif success = true; } else { mark_trace_head(dcontext, f, NULL, NULL); # ifndef CLIENT_SIDELINE LOG(THREAD, LOG_MONITOR, 3, "Client mark trace head : just marked as trace head : address " PFX "\n", tag); # endif } } # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); # endif SHARED_FLAGS_RECURSIVE_LOCK(FRAG_SHARED, release, change_linking_lock); return success; } DR_API /* Checks to see if the fragment (or future fragment) in the drcontext * fcache at tag is marked as a trace head */ bool dr_trace_head_at(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f; bool trace_head; # ifdef CLIENT_SIDELINE fragment_get_fragment_delete_mutex(dcontext); # endif f = fragment_lookup(dcontext, tag); if (f != NULL) trace_head = (f->flags & FRAG_IS_TRACE_HEAD) != 0; else { future_fragment_t *fut = fragment_lookup_future(dcontext, tag); if (fut != NULL) trace_head = (fut->flags & FRAG_IS_TRACE_HEAD) != 0; else trace_head = false; } # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); # endif return trace_head; } DR_API /* checks to see that if there is a trace in the drcontext fcache at tag */ bool dr_trace_exists_at(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *f; bool trace; # ifdef CLIENT_SIDELINE fragment_get_fragment_delete_mutex(dcontext); # endif f = fragment_lookup(dcontext, tag); if (f != NULL) trace = (f->flags & FRAG_IS_TRACE) != 0; else trace = false; # ifdef CLIENT_SIDELINE fragment_release_fragment_delete_mutex(dcontext); # endif return trace; } # ifdef UNSUPPORTED_API DR_API /* All basic blocks created after this routine is called will have a prefix * that restores the ecx register. Exit ctis can be made to target this prefix * instead of the normal entry point by using the instr_branch_set_prefix_target() * routine. * WARNING: this routine should almost always be called during client * initialization, since having a mixture of prefixed and non-prefixed basic * blocks can lead to trouble. */ void dr_add_prefixes_to_basic_blocks(void) { if (DYNAMO_OPTION(coarse_units)) { /* coarse_units doesn't support prefixes in general. * the variation by addr prefix according to processor type * is also not stored in pcaches. */ CLIENT_ASSERT(false, "dr_add_prefixes_to_basic_blocks() not supported with -opt_memory"); } options_make_writable(); dynamo_options.bb_prefixes = true; options_restore_readonly(); } # endif /* UNSUPPORTED_API */ DR_API /* Insert code to get the segment base address pointed at by seg into * register reg. In Linux, it is only supported with -mangle_app_seg option. * In Windows, it only supports getting base address of the TLS segment. */ bool dr_insert_get_seg_base(void *drcontext, instrlist_t *ilist, instr_t *instr, reg_id_t seg, reg_id_t reg) { CLIENT_ASSERT(reg_is_pointer_sized(reg), "dr_insert_get_seg_base: reg has wrong size\n"); # ifdef X86 CLIENT_ASSERT(reg_is_segment(seg), "dr_insert_get_seg_base: seg is not a segment register"); # ifdef UNIX CLIENT_ASSERT(INTERNAL_OPTION(mangle_app_seg), "dr_insert_get_seg_base is supported" "with -mangle_app_seg only"); /* FIXME: we should remove the constraint below by always mangling SEG_TLS, * 1. Getting TLS base could be a common request by clients. * 2. The TLS descriptor setup and selector setup can be separated, * so we must intercept all descriptor setup. It will not be large * runtime overhead for keeping track of the app's TLS segment base. */ CLIENT_ASSERT(INTERNAL_OPTION(private_loader) || seg != SEG_TLS, "dr_insert_get_seg_base supports TLS seg" "only with -private_loader"); if (!INTERNAL_OPTION(mangle_app_seg) || !(INTERNAL_OPTION(private_loader) || seg != SEG_TLS)) return false; if (seg == SEG_FS || seg == SEG_GS) { instrlist_meta_preinsert(ilist, instr, instr_create_restore_from_tls( drcontext, reg, os_get_app_tls_base_offset(seg))); } else { instrlist_meta_preinsert( ilist, instr, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg), OPND_CREATE_INTPTR(0))); } # else /* Windows */ if (seg == SEG_TLS) { instrlist_meta_preinsert( ilist, instr, XINST_CREATE_load(drcontext, opnd_create_reg(reg), opnd_create_far_base_disp(SEG_TLS, REG_NULL, REG_NULL, 0, SELF_TIB_OFFSET, OPSZ_PTR))); } else if (seg == SEG_CS || seg == SEG_DS || seg == SEG_ES || seg == SEG_SS) { /* XXX: we assume flat address space */ instrlist_meta_preinsert( ilist, instr, INSTR_CREATE_mov_imm(drcontext, opnd_create_reg(reg), OPND_CREATE_INTPTR(0))); } else return false; # endif /* UNIX/Windows */ # elif defined(ARM) /* i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); # endif /* X86/ARM */ return true; } DR_API reg_id_t dr_get_stolen_reg() { return IF_X86_ELSE(REG_NULL, dr_reg_stolen); } DR_API bool dr_insert_get_stolen_reg_value(void *drcontext, instrlist_t *ilist, instr_t *instr, reg_id_t reg) { IF_X86(CLIENT_ASSERT(false, "dr_insert_get_stolen_reg: should not be reached\n")); CLIENT_ASSERT(reg_is_pointer_sized(reg), "dr_insert_get_stolen_reg: reg has wrong size\n"); CLIENT_ASSERT(!reg_is_stolen(reg), "dr_insert_get_stolen_reg: reg is used by DynamoRIO\n"); # ifdef AARCHXX instrlist_meta_preinsert( ilist, instr, instr_create_restore_from_tls(drcontext, reg, TLS_REG_STOLEN_SLOT)); # endif return true; } DR_API bool dr_insert_set_stolen_reg_value(void *drcontext, instrlist_t *ilist, instr_t *instr, reg_id_t reg) { IF_X86(CLIENT_ASSERT(false, "dr_insert_set_stolen_reg: should not be reached\n")); CLIENT_ASSERT(reg_is_pointer_sized(reg), "dr_insert_set_stolen_reg: reg has wrong size\n"); CLIENT_ASSERT(!reg_is_stolen(reg), "dr_insert_set_stolen_reg: reg is used by DynamoRIO\n"); # ifdef AARCHXX instrlist_meta_preinsert( ilist, instr, instr_create_save_to_tls(drcontext, reg, TLS_REG_STOLEN_SLOT)); # endif return true; } DR_API int dr_remove_it_instrs(void *drcontext, instrlist_t *ilist) { # if !defined(ARM) return 0; # else int res = 0; instr_t *inst, *next; for (inst = instrlist_first(ilist); inst != NULL; inst = next) { next = instr_get_next(inst); if (instr_get_opcode(inst) == OP_it) { res++; instrlist_remove(ilist, inst); instr_destroy(drcontext, inst); } } return res; # endif } DR_API int dr_insert_it_instrs(void *drcontext, instrlist_t *ilist) { # if !defined(ARM) return 0; # else instr_t *first = instrlist_first(ilist); if (first == NULL || instr_get_isa_mode(first) != DR_ISA_ARM_THUMB) return 0; return reinstate_it_blocks((dcontext_t *)drcontext, ilist, instrlist_first(ilist), NULL); # endif } DR_API bool dr_prepopulate_cache(app_pc *tags, size_t tags_count) { /* We expect get_thread_private_dcontext() to return NULL b/c we're between * dr_app_setup() and dr_app_start() and are considered a "native" thread * with disabled TLS. We do set up TLS as too many routines fail (e.g., * clean call analysis) with NULL from TLS, but we do not set up signal * handling: the caller has to handle decode faults, as we do not * want to enable our signal handlers, which might disrupt the app running * natively in parallel with us. */ thread_record_t *tr = thread_lookup(d_r_get_thread_id()); dcontext_t *dcontext = tr->dcontext; uint i; if (dcontext == NULL) return false; SHARED_BB_LOCK(); SYSLOG_INTERNAL_INFO("pre-building code cache from %d tags", tags_count); # ifdef UNIX os_swap_context(dcontext, false /*to dr*/, DR_STATE_GO_NATIVE); # endif for (i = 0; i < tags_count; i++) { /* There could be duplicates if sthg was deleted and re-added during profiling */ fragment_t coarse_f; fragment_t *f; # ifdef UNIX /* We silently skip DR-segment-reading addresses to help out a caller * who sampled and couldn't avoid self-sampling for decoding. */ if (is_DR_segment_reader_entry(tags[i])) continue; # endif f = fragment_lookup_fine_and_coarse(dcontext, tags[i], &coarse_f, NULL); if (f == NULL) { /* For coarse-grain we won't link as that's done during execution, * but for fine-grained this should produce a fully warmed cache. */ f = build_basic_block_fragment(dcontext, tags[i], 0, true /*link*/, true /*visible*/ _IF_CLIENT(false /*!for_trace*/) _IF_CLIENT(NULL)); } ASSERT(f != NULL); /* We're ok making a thread-private fragment: might be a waste if this * thread never runs it, but simpler than trying to skip them or sthg. */ } # ifdef UNIX os_swap_context(dcontext, true /*to app*/, DR_STATE_GO_NATIVE); # endif SHARED_BB_UNLOCK(); return true; } DR_API bool dr_prepopulate_indirect_targets(dr_indirect_branch_type_t branch_type, app_pc *tags, size_t tags_count) { /* We do the same setup as for dr_prepopulate_cache(). */ thread_record_t *tr = thread_lookup(d_r_get_thread_id()); dcontext_t *dcontext = tr->dcontext; ibl_branch_type_t ibl_type; uint i; if (dcontext == NULL) return false; /* Initially I took in an opcode and used extract_branchtype(instr_branch_type()) * but every use case had to make a fake instr to get the opcode and had no * good cross-platform method so I switched to an enum. We're unlikely to * change our ibt split and we can add new enums in any case. */ switch (branch_type) { case DR_INDIRECT_RETURN: ibl_type = IBL_RETURN; break; case DR_INDIRECT_CALL: ibl_type = IBL_INDCALL; break; case DR_INDIRECT_JUMP: ibl_type = IBL_INDJMP; break; default: return false; } SYSLOG_INTERNAL_INFO("pre-populating ibt[%d] table for %d tags", ibl_type, tags_count); # ifdef UNIX os_swap_context(dcontext, false /*to dr*/, DR_STATE_GO_NATIVE); # endif for (i = 0; i < tags_count; i++) { fragment_add_ibl_target(dcontext, tags[i], ibl_type); } # ifdef UNIX os_swap_context(dcontext, true /*to app*/, DR_STATE_GO_NATIVE); # endif return true; } DR_API bool dr_get_stats(dr_stats_t *drstats) { return stats_get_snapshot(drstats); } /*************************************************************************** * PERSISTENCE */ /* Up to caller to synchronize. */ uint instrument_persist_ro_size(dcontext_t *dcontext, void *perscxt, size_t file_offs) { size_t sz = 0; size_t i; /* Store the set of clients in use as we require the same set in order * to validate the pcache on use. Note that we can't just have -client_lib * be OP_PCACHE_GLOBAL b/c it contains client options too. * We have no unique guids for clients so we store the full path. * We ignore ids. We do care about priority order: clients must * be in the same order in addition to having the same path. * * XXX: we could go further and store client library checksum, etc. hashes, * but that precludes clients from doing their own proper versioning. * * XXX: we could also put the set of clients into the pcache namespace to allow * simultaneous use of pcaches with different sets of clients (empty set * vs under tool, in particular): but doesn't really seem useful enough * for the trouble */ for (i = 0; i < num_client_libs; i++) { sz += strlen(client_libs[i].path) + 1 /*NULL*/; } sz++; /* double NULL ends it */ /* Now for clients' own data. * For user_data, we assume each sequence of <size, patch, persist> is * atomic: caller holds a mutex across the sequence. Thus, we can use * global storage. */ if (persist_ro_size_callbacks.num > 0) { call_all_ret(sz, +=, , persist_ro_size_callbacks, size_t(*)(void *, void *, size_t, void **), (void *)dcontext, perscxt, file_offs + sz, &persist_user_data[idx]); } /* using size_t for API w/ clients in case we want to widen in future */ CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large"); return (uint)sz; } /* Up to caller to synchronize. * Returns true iff all writes succeeded. */ bool instrument_persist_ro(dcontext_t *dcontext, void *perscxt, file_t fd) { bool res = true; size_t i; char nul = '\0'; ASSERT(fd != INVALID_FILE); for (i = 0; i < num_client_libs; i++) { size_t sz = strlen(client_libs[i].path) + 1 /*NULL*/; if (os_write(fd, client_libs[i].path, sz) != (ssize_t)sz) return false; } /* double NULL ends it */ if (os_write(fd, &nul, sizeof(nul)) != (ssize_t)sizeof(nul)) return false; /* Now for clients' own data */ if (persist_ro_size_callbacks.num > 0) { call_all_ret(res, = res &&, , persist_ro_callbacks, bool (*)(void *, void *, file_t, void *), (void *)dcontext, perscxt, fd, persist_user_data[idx]); } return res; } /* Returns true if successfully validated and de-serialized */ bool instrument_resurrect_ro(dcontext_t *dcontext, void *perscxt, byte *map) { bool res = true; size_t i; const char *c; ASSERT(map != NULL); /* Ensure we have the same set of tools (see comments above) */ i = 0; c = (const char *)map; while (*c != '\0') { if (i >= num_client_libs) return false; /* too many clients */ if (strcmp(client_libs[i].path, c) != 0) return false; /* client path mismatch */ c += strlen(c) + 1; i++; } if (i < num_client_libs) return false; /* too few clients */ c++; /* Now for clients' own data */ if (resurrect_ro_callbacks.num > 0) { call_all_ret(res, = res &&, , resurrect_ro_callbacks, bool (*)(void *, void *, byte **), (void *)dcontext, perscxt, (byte **)&c); } return res; } /* Up to caller to synchronize. */ uint instrument_persist_rx_size(dcontext_t *dcontext, void *perscxt, size_t file_offs) { size_t sz = 0; if (persist_rx_size_callbacks.num == 0) return 0; call_all_ret(sz, +=, , persist_rx_size_callbacks, size_t(*)(void *, void *, size_t, void **), (void *)dcontext, perscxt, file_offs + sz, &persist_user_data[idx]); /* using size_t for API w/ clients in case we want to widen in future */ CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large"); return (uint)sz; } /* Up to caller to synchronize. * Returns true iff all writes succeeded. */ bool instrument_persist_rx(dcontext_t *dcontext, void *perscxt, file_t fd) { bool res = true; ASSERT(fd != INVALID_FILE); if (persist_rx_callbacks.num == 0) return true; call_all_ret(res, = res &&, , persist_rx_callbacks, bool (*)(void *, void *, file_t, void *), (void *)dcontext, perscxt, fd, persist_user_data[idx]); return res; } /* Returns true if successfully validated and de-serialized */ bool instrument_resurrect_rx(dcontext_t *dcontext, void *perscxt, byte *map) { bool res = true; ASSERT(map != NULL); if (resurrect_rx_callbacks.num == 0) return true; call_all_ret(res, = res &&, , resurrect_rx_callbacks, bool (*)(void *, void *, byte **), (void *)dcontext, perscxt, &map); return res; } /* Up to caller to synchronize. */ uint instrument_persist_rw_size(dcontext_t *dcontext, void *perscxt, size_t file_offs) { size_t sz = 0; if (persist_rw_size_callbacks.num == 0) return 0; call_all_ret(sz, +=, , persist_rw_size_callbacks, size_t(*)(void *, void *, size_t, void **), (void *)dcontext, perscxt, file_offs + sz, &persist_user_data[idx]); /* using size_t for API w/ clients in case we want to widen in future */ CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(sz), "persisted cache size too large"); return (uint)sz; } /* Up to caller to synchronize. * Returns true iff all writes succeeded. */ bool instrument_persist_rw(dcontext_t *dcontext, void *perscxt, file_t fd) { bool res = true; ASSERT(fd != INVALID_FILE); if (persist_rw_callbacks.num == 0) return true; call_all_ret(res, = res &&, , persist_rw_callbacks, bool (*)(void *, void *, file_t, void *), (void *)dcontext, perscxt, fd, persist_user_data[idx]); return res; } /* Returns true if successfully validated and de-serialized */ bool instrument_resurrect_rw(dcontext_t *dcontext, void *perscxt, byte *map) { bool res = true; ASSERT(map != NULL); if (resurrect_rw_callbacks.num == 0) return true; call_all_ret(res, = res &&, , resurrect_rx_callbacks, bool (*)(void *, void *, byte **), (void *)dcontext, perscxt, &map); return res; } bool instrument_persist_patch(dcontext_t *dcontext, void *perscxt, byte *bb_start, size_t bb_size) { bool res = true; if (persist_patch_callbacks.num == 0) return true; call_all_ret(res, = res &&, , persist_patch_callbacks, bool (*)(void *, void *, byte *, size_t, void *), (void *)dcontext, perscxt, bb_start, bb_size, persist_user_data[idx]); return res; } DR_API bool dr_register_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt, size_t file_offs, void **user_data OUT), bool (*func_persist)(void *drcontext, void *perscxt, file_t fd, void *user_data), bool (*func_resurrect)(void *drcontext, void *perscxt, byte **map INOUT)) { if (func_size == NULL || func_persist == NULL || func_resurrect == NULL) return false; add_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true); add_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true); add_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect, true); return true; } DR_API bool dr_unregister_persist_ro(size_t (*func_size)(void *drcontext, void *perscxt, size_t file_offs, void **user_data OUT), bool (*func_persist)(void *drcontext, void *perscxt, file_t fd, void *user_data), bool (*func_resurrect)(void *drcontext, void *perscxt, byte **map INOUT)) { bool res = true; if (func_size != NULL) { res = remove_callback(&persist_ro_size_callbacks, (void (*)(void))func_size, true) && res; } else res = false; if (func_persist != NULL) { res = remove_callback(&persist_ro_callbacks, (void (*)(void))func_persist, true) && res; } else res = false; if (func_resurrect != NULL) { res = remove_callback(&resurrect_ro_callbacks, (void (*)(void))func_resurrect, true) && res; } else res = false; return res; } DR_API bool dr_register_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt, size_t file_offs, void **user_data OUT), bool (*func_persist)(void *drcontext, void *perscxt, file_t fd, void *user_data), bool (*func_resurrect)(void *drcontext, void *perscxt, byte **map INOUT)) { if (func_size == NULL || func_persist == NULL || func_resurrect == NULL) return false; add_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true); add_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true); add_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect, true); return true; } DR_API bool dr_unregister_persist_rx(size_t (*func_size)(void *drcontext, void *perscxt, size_t file_offs, void **user_data OUT), bool (*func_persist)(void *drcontext, void *perscxt, file_t fd, void *user_data), bool (*func_resurrect)(void *drcontext, void *perscxt, byte **map INOUT)) { bool res = true; if (func_size != NULL) { res = remove_callback(&persist_rx_size_callbacks, (void (*)(void))func_size, true) && res; } else res = false; if (func_persist != NULL) { res = remove_callback(&persist_rx_callbacks, (void (*)(void))func_persist, true) && res; } else res = false; if (func_resurrect != NULL) { res = remove_callback(&resurrect_rx_callbacks, (void (*)(void))func_resurrect, true) && res; } else res = false; return res; } DR_API bool dr_register_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt, size_t file_offs, void **user_data OUT), bool (*func_persist)(void *drcontext, void *perscxt, file_t fd, void *user_data), bool (*func_resurrect)(void *drcontext, void *perscxt, byte **map INOUT)) { if (func_size == NULL || func_persist == NULL || func_resurrect == NULL) return false; add_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true); add_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true); add_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect, true); return true; } DR_API bool dr_unregister_persist_rw(size_t (*func_size)(void *drcontext, void *perscxt, size_t file_offs, void **user_data OUT), bool (*func_persist)(void *drcontext, void *perscxt, file_t fd, void *user_data), bool (*func_resurrect)(void *drcontext, void *perscxt, byte **map INOUT)) { bool res = true; if (func_size != NULL) { res = remove_callback(&persist_rw_size_callbacks, (void (*)(void))func_size, true) && res; } else res = false; if (func_persist != NULL) { res = remove_callback(&persist_rw_callbacks, (void (*)(void))func_persist, true) && res; } else res = false; if (func_resurrect != NULL) { res = remove_callback(&resurrect_rw_callbacks, (void (*)(void))func_resurrect, true) && res; } else res = false; return res; } DR_API bool dr_register_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt, byte *bb_start, size_t bb_size, void *user_data)) { if (func_patch == NULL) return false; add_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true); return true; } DR_API bool dr_unregister_persist_patch(bool (*func_patch)(void *drcontext, void *perscxt, byte *bb_start, size_t bb_size, void *user_data)) { return remove_callback(&persist_patch_callbacks, (void (*)(void))func_patch, true); } #endif /* CLIENT_INTERFACE */
1
17,684
I would just remove this assert as it's going to get un-maintainable with a long list of valid sizes. Ditto below.
DynamoRIO-dynamorio
c
@@ -28,6 +28,7 @@ func DefaultConfig() Config { ReservedPorts: []uint16{SSHPort, DockerReservedPort, DockerReservedSSLPort, AgentIntrospectionPort, AgentCredentialsPort}, ReservedPortsUDP: []uint16{}, DataDir: "/data/", + HostDataDir: "/var/lib/ecs/", DisableMetrics: false, ReservedMemory: 0, AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JSONFileDriver},
1
// +build !windows // Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package config import "github.com/aws/amazon-ecs-agent/agent/engine/dockerclient" const ( // defaultAuditLogFile specifies the default audit log filename defaultCredentialsAuditLogFile = "/log/audit.log" ) // DefaultConfig returns the default configuration for Linux func DefaultConfig() Config { return Config{ DockerEndpoint: "unix:///var/run/docker.sock", ReservedPorts: []uint16{SSHPort, DockerReservedPort, DockerReservedSSLPort, AgentIntrospectionPort, AgentCredentialsPort}, ReservedPortsUDP: []uint16{}, DataDir: "/data/", DisableMetrics: false, ReservedMemory: 0, AvailableLoggingDrivers: []dockerclient.LoggingDriver{dockerclient.JSONFileDriver}, TaskCleanupWaitDuration: DefaultTaskCleanupWaitDuration, DockerStopTimeout: DefaultDockerStopTimeout, CredentialsAuditLogFile: defaultCredentialsAuditLogFile, CredentialsAuditLogDisabled: false, ImageCleanupDisabled: false, MinimumImageDeletionAge: DefaultImageDeletionAge, ImageCleanupInterval: DefaultImageCleanupTimeInterval, NumImagesToDeletePerCycle: DefaultNumImagesToDeletePerCycle, } } func (config *Config) platformOverrides() {}
1
15,637
`HostDataDir` is misleading. Can we rename it to something more relevant ? The constant should also be moved up and reused as necessary.
aws-amazon-ecs-agent
go
@@ -37,6 +37,9 @@ const ( defaultMTUGRE = 1462 defaultMTUSTT = 1500 defaultMTU = 1500 + // IPsec ESP can add a maximum of 38 bytes to the packet including the ESP + // header and trailer. + ipsecESPOverhead = 38 ) type Options struct {
1
// Copyright 2019 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "io/ioutil" "net" "github.com/spf13/pflag" "gopkg.in/yaml.v2" "github.com/vmware-tanzu/antrea/pkg/agent/config" "github.com/vmware-tanzu/antrea/pkg/cni" "github.com/vmware-tanzu/antrea/pkg/ovs/ovsconfig" ) const ( defaultOVSBridge = "br-int" defaultHostGateway = "gw0" defaultHostProcPathPrefix = "/host" defaultServiceCIDR = "10.96.0.0/12" defaultMTUVXLAN = 1450 defaultMTUGeneve = 1450 defaultMTUGRE = 1462 defaultMTUSTT = 1500 defaultMTU = 1500 ) type Options struct { // The path of configuration file. configFile string // The configuration object config *AgentConfig } func newOptions() *Options { return &Options{ config: new(AgentConfig), } } // addFlags adds flags to fs and binds them to options. func (o *Options) addFlags(fs *pflag.FlagSet) { fs.StringVar(&o.configFile, "config", o.configFile, "The path to the configuration file") } // complete completes all the required options. func (o *Options) complete(args []string) error { if len(o.configFile) > 0 { c, err := o.loadConfigFromFile(o.configFile) if err != nil { return err } o.config = c } o.setDefaults() return nil } // validate validates all the required options. It must be called after complete. func (o *Options) validate(args []string) error { if len(args) != 0 { return fmt.Errorf("an empty argument list is not supported") } // Validate service CIDR configuration _, _, err := net.ParseCIDR(o.config.ServiceCIDR) if err != nil { return fmt.Errorf("service CIDR %s is invalid", o.config.ServiceCIDR) } if o.config.TunnelType != ovsconfig.VXLANTunnel && o.config.TunnelType != ovsconfig.GeneveTunnel && o.config.TunnelType != ovsconfig.GRETunnel && o.config.TunnelType != ovsconfig.STTTunnel { return fmt.Errorf("tunnel type %s is invalid", o.config.TunnelType) } if o.config.EnableIPSecTunnel && o.config.TunnelType != ovsconfig.GRETunnel { return fmt.Errorf("IPSec encyption is supported only for GRE tunnel") } if o.config.OVSDatapathType != ovsconfig.OVSDatapathSystem && o.config.OVSDatapathType != ovsconfig.OVSDatapathNetdev { return fmt.Errorf("OVS datapath type %s is not supported", o.config.OVSDatapathType) } ok, encapMode := config.GetTrafficEncapModeFromStr(o.config.TrafficEncapMode) if !ok { return fmt.Errorf("TrafficEncapMode %s is unknown", o.config.TrafficEncapMode) } if encapMode.SupportsNoEncap() && o.config.EnableIPSecTunnel { return fmt.Errorf("IPSec tunnel may only be enabled on %s mode", config.TrafficEncapModeEncap) } return nil } func (o *Options) loadConfigFromFile(file string) (*AgentConfig, error) { data, err := ioutil.ReadFile(file) if err != nil { return nil, err } var c AgentConfig err = yaml.UnmarshalStrict(data, &c) if err != nil { return nil, err } return &c, nil } func (o *Options) setDefaults() { if o.config.CNISocket == "" { o.config.CNISocket = cni.AntreaCNISocketAddr } if o.config.OVSBridge == "" { o.config.OVSBridge = defaultOVSBridge } if o.config.OVSDatapathType == "" { o.config.OVSDatapathType = ovsconfig.OVSDatapathSystem } if o.config.HostGateway == "" { o.config.HostGateway = defaultHostGateway } if o.config.TunnelType == "" { o.config.TunnelType = ovsconfig.VXLANTunnel } if o.config.HostProcPathPrefix == "" { o.config.HostProcPathPrefix = defaultHostProcPathPrefix } if o.config.ServiceCIDR == "" { o.config.ServiceCIDR = defaultServiceCIDR } if o.config.TrafficEncapMode == "" { o.config.TrafficEncapMode = config.TrafficEncapModeEncap.String() } if o.config.DefaultMTU == 0 { ok, encapMode := config.GetTrafficEncapModeFromStr(o.config.TrafficEncapMode) if ok && !encapMode.SupportsEncap() { o.config.DefaultMTU = defaultMTU } else if o.config.TunnelType == ovsconfig.VXLANTunnel { o.config.DefaultMTU = defaultMTUVXLAN } else if o.config.TunnelType == ovsconfig.GeneveTunnel { o.config.DefaultMTU = defaultMTUGeneve } else if o.config.TunnelType == ovsconfig.GRETunnel { o.config.DefaultMTU = defaultMTUGRE } else if o.config.TunnelType == ovsconfig.STTTunnel { o.config.DefaultMTU = defaultMTUSTT } } }
1
13,177
I still feel like we are double-counting the outer IP header here (once in `defaultMTUGRE` and once in `ipsecESPOverhead`) but I'm not that familiar with IPsec.
antrea-io-antrea
go
@@ -14,11 +14,15 @@ // limitations under the License. // </copyright> -#nullable enable +using System; +using System.Collections.Generic; +using System.Diagnostics.Metrics; namespace OpenTelemetry.Metrics { public abstract class MeasurementProcessor : BaseProcessor<MeasurementItem> { + public abstract void OnEnd<T>(MeasurementItem measurementItem, ref DateTimeOffset dt, ref T value, ref ReadOnlySpan<KeyValuePair<string, object>> tags) + where T : struct; } }
1
// <copyright file="MeasurementProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> #nullable enable namespace OpenTelemetry.Metrics { public abstract class MeasurementProcessor : BaseProcessor<MeasurementItem> { } }
1
20,223
Not sure what benefit we gain by extending BaseProcessor here, as this seems to be defining a new OnEnd method. Could we modify MeasurementItem to have all the things, and then MeasurementProcessor can be simply extending BaseProcessor<MeasurementItem> (not blocking. just noting some observations in the PR :) )
open-telemetry-opentelemetry-dotnet
.cs
@@ -3,6 +3,13 @@ package cmd import ( "bufio" "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/exec" "github.com/drud/ddev/pkg/fileutil"
1
package cmd import ( "bufio" "fmt" "github.com/drud/ddev/pkg/ddevapp" "github.com/drud/ddev/pkg/exec" "github.com/drud/ddev/pkg/fileutil" "github.com/drud/ddev/pkg/globalconfig" "github.com/drud/ddev/pkg/util" "github.com/gobuffalo/packr/v2" "github.com/mattn/go-isatty" "github.com/spf13/cobra" "io/ioutil" "os" "path" "path/filepath" "runtime" "strings" ) // addCustomCommands looks for custom command scripts in // ~/.ddev/commands/<servicename> etc. and // .ddev/commands/<servicename> and .ddev/commands/host // and if it finds them adds them to Cobra's commands. func addCustomCommands(rootCmd *cobra.Command) error { app, err := ddevapp.GetActiveApp("") if err != nil { return nil } sourceGlobalCommandPath := filepath.Join(globalconfig.GetGlobalDdevDir(), "commands") err = os.MkdirAll(sourceGlobalCommandPath, 0755) if err != nil { return nil } projectCommandPath := app.GetConfigPath("commands") // Make sure our target global command directory is empty targetGlobalCommandPath := app.GetConfigPath(".global_commands") _ = os.RemoveAll(targetGlobalCommandPath) err = fileutil.CopyDir(sourceGlobalCommandPath, targetGlobalCommandPath) if err != nil { return err } if !fileutil.FileExists(projectCommandPath) || !fileutil.IsDirectory(projectCommandPath) { return nil } commandsAdded := map[string]int{} for _, commandSet := range []string{projectCommandPath, targetGlobalCommandPath} { commandDirs, err := fileutil.ListFilesInDirFullPath(commandSet) if err != nil { return err } for _, serviceDirOnHost := range commandDirs { service := filepath.Base(serviceDirOnHost) // If the item isn't actually a directory, just skip it. if !fileutil.IsDirectory(serviceDirOnHost) { continue } commandFiles, err := fileutil.ListFilesInDir(serviceDirOnHost) if err != nil { return err } if runtime.GOOS == "windows" { windowsBashPath := util.FindWindowsBashPath() if windowsBashPath == "" { fmt.Println("Unable to find bash.exe in PATH, not loading custom commands") return nil } } for _, commandName := range commandFiles { // Use path.Join() for the inContainerFullPath because it'serviceDirOnHost about the path in the container, not on the // host; a Windows path is not useful here. inContainerFullPath := path.Join("/mnt/ddev_config", filepath.Base(commandSet), service, commandName) onHostFullPath := filepath.Join(commandSet, service, commandName) if strings.HasSuffix(commandName, ".example") || strings.HasPrefix(commandName, "README") || strings.HasPrefix(commandName, ".") || fileutil.IsDirectory(onHostFullPath) { continue } // If command has already been added, we won't work with it again. if _, ok := commandsAdded[commandName]; ok { util.Warning("not adding command %s (%s) because it was already added to project %s", commandName, onHostFullPath, app.Name) continue } // Any command we find will want to be executable on Linux _ = os.Chmod(onHostFullPath, 0755) if hasCR, _ := fileutil.FgrepStringInFile(onHostFullPath, "\r\n"); hasCR { util.Warning("command '%s' contains CRLF, please convert to Linux-style linefeeds with dos2unix or another tool, skipping %s", commandName, onHostFullPath) continue } directives := findDirectivesInScriptCommand(onHostFullPath) var description, usage, example, projectTypes, osTypes, hostBinaryExists string description = commandName if val, ok := directives["Description"]; ok { description = val } if val, ok := directives["Usage"]; ok { usage = val } if val, ok := directives["Example"]; ok { example = " " + strings.ReplaceAll(val, `\n`, "\n ") } if val, ok := directives["ProjectTypes"]; ok { projectTypes = val } // If ProjectTypes is specified and we aren't of that type, skip if projectTypes != "" && !strings.Contains(projectTypes, app.Type) { continue } if val, ok := directives["OSTypes"]; ok { osTypes = val } // If OSTypes is specified and we aren't this isn't a specified OS, skip if osTypes != "" && !strings.Contains(osTypes, runtime.GOOS) { continue } if val, ok := directives["HostBinaryExists"]; ok { hostBinaryExists = val } // If hostBinaryExists is specified it doesn't exist here, skip if hostBinaryExists != "" && !fileutil.FileExists(hostBinaryExists) { continue } descSuffix := " (shell " + service + " container command)" if commandSet == targetGlobalCommandPath { descSuffix = " (global shell " + service + " container command)" } commandToAdd := &cobra.Command{ Use: usage, Short: description + descSuffix, Example: example, FParseErrWhitelist: cobra.FParseErrWhitelist{ UnknownFlags: true, }, } if service == "host" { commandToAdd.Run = makeHostCmd(app, onHostFullPath, commandName) } else { commandToAdd.Run = makeContainerCmd(app, inContainerFullPath, commandName, service) } rootCmd.AddCommand(commandToAdd) commandsAdded[commandName] = 1 } } } return nil } // makeHostCmd creates a command which will run on the host func makeHostCmd(app *ddevapp.DdevApp, fullPath, name string) func(*cobra.Command, []string) { var windowsBashPath = "" if runtime.GOOS == "windows" { windowsBashPath = util.FindWindowsBashPath() } return func(cmd *cobra.Command, cobraArgs []string) { if app.SiteStatus() != ddevapp.SiteRunning { err := app.Start() if err != nil { util.Failed("Failed to start project for custom command: %v", err) } } app.DockerEnv() osArgs := []string{} if len(os.Args) > 2 { osArgs = os.Args[2:] } var err error // Load environment variables that may be useful for script. app.DockerEnv() if runtime.GOOS == "windows" { // Sadly, not sure how to have a bash interpreter without this. args := []string{fullPath} args = append(args, osArgs...) err = exec.RunInteractiveCommand(windowsBashPath, args) } else { err = exec.RunInteractiveCommand(fullPath, osArgs) } if err != nil { util.Failed("Failed to run %s %v; error=%v", name, strings.Join(osArgs, " "), err) } } } // makeContainerCmd creates the command which will app.Exec to a container command func makeContainerCmd(app *ddevapp.DdevApp, fullPath, name string, service string) func(*cobra.Command, []string) { s := service if s[0:1] == "." { s = s[1:] } return func(cmd *cobra.Command, args []string) { if app.SiteStatus() != ddevapp.SiteRunning { err := app.Start() if err != nil { util.Failed("Failed to start project for custom command: %v", err) } } app.DockerEnv() osArgs := []string{} if len(os.Args) > 2 { osArgs = os.Args[2:] } _, _, err := app.Exec(&ddevapp.ExecOpts{ Cmd: fullPath + " " + strings.Join(osArgs, " "), Service: s, Dir: app.GetWorkingDir(s, ""), Tty: isatty.IsTerminal(os.Stdin.Fd()), NoCapture: true, }) if err != nil { util.Failed("Failed to run %s %v: %v", name, strings.Join(osArgs, " "), err) } } } // findDirectivesInScriptCommand() Returns a map of directives and their contents // found in the named script func findDirectivesInScriptCommand(script string) map[string]string { f, err := os.Open(script) if err != nil { util.Failed("Failed to open %s: %v", script, err) } // nolint errcheck defer f.Close() var directives = make(map[string]string) // Splits on newlines by default. scanner := bufio.NewScanner(f) for scanner.Scan() { line := scanner.Text() if strings.HasPrefix(line, "## ") && strings.Contains(line, ":") { line = strings.Replace(line, "## ", "", 1) parts := strings.SplitN(line, ":", 2) parts[1] = strings.Trim(parts[1], " ") directives[parts[0]] = parts[1] } } if err := scanner.Err(); err != nil { return nil } return directives } // populateExamplesCommandsHomeadditions grabs packr2 assets // When the items in the assets directory are changed, the packr2 command // must be run again in this directory (cmd/ddev/cmd) to update the saved // embedded files. // "make packr2" can be used to update the packr2 cache. func populateExamplesCommandsHomeadditions() error { app, err := ddevapp.GetActiveApp("") if err != nil { return nil } box := packr.New("customcommands", "./dotddev_assets") list := box.List() for _, file := range list { localPath := app.GetConfigPath(file) sigFound, err := fileutil.FgrepStringInFile(localPath, ddevapp.DdevFileSignature) if sigFound || err != nil { content, err := box.Find(file) if err != nil { return err } err = os.MkdirAll(filepath.Dir(localPath), 0755) if err != nil { return err } err = ioutil.WriteFile(localPath, content, 0755) if err != nil { return err } } } // This brings in both the commands and the homeadditions files box = packr.New("global_dotddev", "./global_dotddev_assets") list = box.List() globalDdevDir := globalconfig.GetGlobalDdevDir() for _, file := range list { localPath := filepath.Join(globalDdevDir, file) sigFound, err := fileutil.FgrepStringInFile(localPath, ddevapp.DdevFileSignature) if sigFound || err != nil { content, err := box.Find(file) if err != nil { return err } err = os.MkdirAll(filepath.Dir(localPath), 0755) if err != nil { return err } err = ioutil.WriteFile(localPath, content, 0755) if err != nil { return err } } } return nil }
1
14,642
This change was not really intended but made by the linter of VS Code. And looking at other packages this looks like a best practise to place interal packages on the top and gh imports afterwards.
drud-ddev
go
@@ -51,7 +51,8 @@ type Options struct { DefaultSamplingPolicy trace.Sampler } -// New creates a new server. New(nil) is the same as new(Server). +// New creates a new server. New(nil) is the same as new(Server). Note: A +// configured Requestlogger will not log HealthChecks. func New(opts *Options) *Server { srv := new(Server) if opts != nil {
1
// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package server provides a preconfigured HTTP server with diagnostic hooks. package server import ( "net/http" "path" "sync" "github.com/google/go-cloud/health" "github.com/google/go-cloud/requestlog" "github.com/google/go-cloud/wire" "go.opencensus.io/trace" ) // Set is a Wire provider set that produces a *Server given the fields of // Options. This set might add new inputs over time, but they can always be the // zero value. var Set = wire.NewSet(New, Options{}) // Server is a preconfigured HTTP server with diagnostic hooks. // The zero value is a server with the default options. type Server struct { reqlog requestlog.Logger healthHandler health.Handler te trace.Exporter sampler trace.Sampler once sync.Once } // Options is the set of optional parameters. type Options struct { RequestLogger requestlog.Logger HealthChecks []health.Checker TraceExporter trace.Exporter DefaultSamplingPolicy trace.Sampler } // New creates a new server. New(nil) is the same as new(Server). func New(opts *Options) *Server { srv := new(Server) if opts != nil { srv.reqlog = opts.RequestLogger srv.te = opts.TraceExporter for _, c := range opts.HealthChecks { srv.healthHandler.Add(c) } srv.sampler = opts.DefaultSamplingPolicy } return srv } func (srv *Server) init() { srv.once.Do(func() { if srv.te != nil { trace.RegisterExporter(srv.te) } if srv.sampler != nil { trace.ApplyConfig(trace.Config{DefaultSampler: srv.sampler}) } }) } // ListenAndServe is a wrapper to use wherever http.ListenAndServe is used. // It wraps the passed-in http.Handler with a handler that handles tracing and // request logging. If the handler is nil, then http.DefaultServeMux will be used. func (srv *Server) ListenAndServe(addr string, h http.Handler) error { srv.init() // Setup health checks, /healthz route is taken by health checks by default. // Note: App Engine Flex uses /_ah/health by default, which can be changed // in app.yaml. We may want to do an auto-detection for flex in future. hr := "/healthz/" hcMux := http.NewServeMux() hcMux.HandleFunc(path.Join(hr, "liveness"), health.HandleLive) hcMux.Handle(path.Join(hr, "readiness"), &srv.healthHandler) mux := http.NewServeMux() mux.Handle(hr, hcMux) h = http.Handler(handler{h}) if srv.reqlog != nil { h = requestlog.NewHandler(srv.reqlog, h) } mux.Handle("/", h) return http.ListenAndServe(addr, mux) } // handler is a handler wrapper that handles tracing through OpenCensus for users. // TODO(shantuo): unify handler types from trace, requestlog, health checks, etc together. type handler struct { handler http.Handler } func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ctx, span := trace.StartSpan(r.Context(), r.URL.Host+r.URL.Path) defer span.End() r = r.WithContext(ctx) if h.handler == nil { h.handler = http.DefaultServeMux } h.handler.ServeHTTP(w, r) }
1
10,759
This seems like a more appropriate message under `ListenAndServe`. WDYT?
google-go-cloud
go
@@ -156,7 +156,6 @@ test.suite( await driver.get(fileServer.Pages.basicAuth) let source = await driver.getPageSource() assert.strictEqual(source.includes('Access granted!'), true) - await server.stop() }) })
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. 'use strict' const assert = require('assert') const fs = require('fs') const path = require('path') const chrome = require('../../chrome') const error = require('../../lib/error') const fileServer = require('../../lib/test/fileserver') const io = require('../../io') const test = require('../../lib/test') const until = require('../../lib/until') test.suite( function (env) { let driver before(async function () { driver = await env .builder() .setChromeOptions(new chrome.Options().headless()) .build() }) after(() => driver.quit()) it('can send commands to devtools', async function () { await driver.get(test.Pages.ajaxyPage) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.ajaxyPage) await driver.sendDevToolsCommand('Page.navigate', { url: test.Pages.echoPage, }) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.echoPage) }) it('can send commands to devtools and get return', async function () { await driver.get(test.Pages.ajaxyPage) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.ajaxyPage) await driver.get(test.Pages.echoPage) assert.strictEqual(await driver.getCurrentUrl(), test.Pages.echoPage) let history = await driver.sendAndGetDevToolsCommand( 'Page.getNavigationHistory' ) assert(history) assert(history.currentIndex >= 2) assert.strictEqual( history.entries[history.currentIndex].url, test.Pages.echoPage ) assert.strictEqual( history.entries[history.currentIndex - 1].url, test.Pages.ajaxyPage ) }) it('sends Page.enable command using devtools', async function () { const cdpConnection = await driver.createCDPConnection('page') cdpConnection.execute('Page.enable', 1, {}, function (_res, err) { assert(!err) }) }) it('sends Network and Page command using devtools', async function () { const cdpConnection = await driver.createCDPConnection('page') cdpConnection.execute('Network.enable', 1, {}, function (_res, err) { assert(!err) }) cdpConnection.execute( 'Page.navigate', 1, { url: 'chrome://newtab/' }, function (_res, err) { assert(!err) } ) }) describe('JS CDP events', function () { it('calls the event listener for console.log', async function () { const cdpConnection = await driver.createCDPConnection('page') await driver.onLogEvent(cdpConnection, function (event) { assert.strictEqual(event['args'][0]['value'], 'here') }) await driver.executeScript('console.log("here")') }) it('calls the event listener for js exceptions', async function () { const cdpConnection = await driver.createCDPConnection('page') await driver.onLogException(cdpConnection, function (event) { assert.strictEqual( event['exceptionDetails']['stackTrace']['callFrames'][0][ 'functionName' ], 'onmouseover' ) }) await driver.get(test.Pages.javascriptPage) let element = driver.findElement({ id: 'throwing-mouseover' }) await element.click() }) }) describe('JS DOM events', function () { it('calls the event listener on dom mutations', async function () { const cdpConnection = await driver.createCDPConnection('page') await driver.logMutationEvents(cdpConnection, function (event) { assert.strictEqual(event['attribute_name'], 'style') assert.strictEqual(event['current_value'], '') assert.strictEqual(event['old_value'], 'display:none;') }) await driver.get(fileServer.Pages.dynamicPage) let element = driver.findElement({ id: 'reveal' }) await element.click() let revealed = driver.findElement({ id: 'revealed' }) await driver.wait(until.elementIsVisible(revealed), 5000) }) }) describe('Basic Auth Injection', function () { it('denies entry if username and password do not match', async function () { const pageCdpConnection = await driver.createCDPConnection('page') await driver.register('random', 'random', pageCdpConnection) await driver.get(fileServer.Pages.basicAuth) let source = await driver.getPageSource() assert.strictEqual(source.includes('Access granted!'), false) }) it('grants access if username and password are a match', async function () { const pageCdpConnection = await driver.createCDPConnection('page') await driver.register('genie', 'bottle', pageCdpConnection) await driver.get(fileServer.Pages.basicAuth) let source = await driver.getPageSource() assert.strictEqual(source.includes('Access granted!'), true) await server.stop() }) }) describe('setDownloadPath', function () { it('can enable downloads in headless mode', async function () { const dir = await io.tmpDir() await driver.setDownloadPath(dir) const url = fileServer.whereIs('/data/firefox/webextension.xpi') await driver.get(`data:text/html,<!DOCTYPE html> <div><a download="" href="${url}">Go!</a></div>`) await driver.findElement({ css: 'a' }).click() const downloadPath = path.join(dir, 'webextension.xpi') await driver.wait(() => io.exists(downloadPath), 5000) const goldenPath = path.join( __dirname, '../../lib/test/data/firefox/webextension.xpi' ) assert.strictEqual( fs.readFileSync(downloadPath, 'binary'), fs.readFileSync(goldenPath, 'binary') ) }) it('throws if path is not a directory', async function () { await assertInvalidArgumentError(() => driver.setDownloadPath()) await assertInvalidArgumentError(() => driver.setDownloadPath(null)) await assertInvalidArgumentError(() => driver.setDownloadPath('')) await assertInvalidArgumentError(() => driver.setDownloadPath(1234)) const file = await io.tmpFile() await assertInvalidArgumentError(() => driver.setDownloadPath(file)) async function assertInvalidArgumentError(fn) { try { await fn() return Promise.reject(Error('should have failed')) } catch (err) { if (err instanceof error.InvalidArgumentError) { return } throw err } } }) }) }, { browsers: ['chrome'] } )
1
18,850
Is this not required?
SeleniumHQ-selenium
py
@@ -360,7 +360,16 @@ class AdminController extends Controller } $searchableFields = $this->entity['search']['fields']; - $paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'), $this->entity['search']['dql_filter']); + $paginator = $this->findBy( + $this->entity['class'], + $this->request->query->get('query'), + $searchableFields, + $this->request->query->get('page', 1), + $this->config['list']['max_results'], + isset($this->entity['search']['sort']['field']) ? $this->entity['search']['sort']['field'] : $this->request->query->get('sortField'), + isset($this->entity['search']['sort']['direction']) ? $this->entity['search']['sort']['direction'] : $this->request->query->get('sortDirection'), + $this->entity['search']['dql_filter'] + ); $fields = $this->entity['list']['fields']; $this->dispatch(EasyAdminEvents::POST_SEARCH, array(
1
<?php /* * This file is part of the EasyAdminBundle. * * (c) Javier Eguiluz <[email protected]> * * For the full copyright and license information, please view the LICENSE * file that was distributed with this source code. */ namespace EasyCorp\Bundle\EasyAdminBundle\Controller; use Doctrine\DBAL\Exception\ForeignKeyConstraintViolationException; use Doctrine\ORM\EntityManager; use Doctrine\ORM\QueryBuilder; use EasyCorp\Bundle\EasyAdminBundle\Event\EasyAdminEvents; use EasyCorp\Bundle\EasyAdminBundle\Exception\EntityRemoveException; use EasyCorp\Bundle\EasyAdminBundle\Exception\ForbiddenActionException; use EasyCorp\Bundle\EasyAdminBundle\Exception\NoEntitiesConfiguredException; use EasyCorp\Bundle\EasyAdminBundle\Exception\UndefinedEntityException; use EasyCorp\Bundle\EasyAdminBundle\Form\Util\LegacyFormHelper; use Pagerfanta\Pagerfanta; use Sensio\Bundle\FrameworkExtraBundle\Configuration\Route; use Symfony\Bundle\FrameworkBundle\Controller\Controller; use Symfony\Component\EventDispatcher\GenericEvent; use Symfony\Component\Form\Form; use Symfony\Component\Form\FormBuilder; use Symfony\Component\Form\FormBuilderInterface; use Symfony\Component\Form\FormInterface; use Symfony\Component\HttpFoundation\JsonResponse; use Symfony\Component\HttpFoundation\RedirectResponse; use Symfony\Component\HttpFoundation\Request; use Symfony\Component\HttpFoundation\Response; /** * The controller used to render all the default EasyAdmin actions. * * @author Javier Eguiluz <[email protected]> */ class AdminController extends Controller { /** @var array The full configuration of the entire backend */ protected $config; /** @var array The full configuration of the current entity */ protected $entity = array(); /** @var Request The instance of the current Symfony request */ protected $request; /** @var EntityManager The Doctrine entity manager for the current entity */ protected $em; /** * @Route("/", name="easyadmin") * @Route("/", name="admin") * * The 'admin' route is deprecated since version 1.8.0 and it will be removed in 2.0. * * @param Request $request * * @return RedirectResponse|Response */ public function indexAction(Request $request) { $this->initialize($request); if (null === $request->query->get('entity')) { return $this->redirectToBackendHomepage(); } $action = $request->query->get('action', 'list'); if (!$this->isActionAllowed($action)) { throw new ForbiddenActionException(array('action' => $action, 'entity_name' => $this->entity['name'])); } return $this->executeDynamicMethod($action.'<EntityName>Action'); } /** * Utility method which initializes the configuration of the entity on which * the user is performing the action. * * @param Request $request */ protected function initialize(Request $request) { $this->dispatch(EasyAdminEvents::PRE_INITIALIZE); $this->config = $this->get('easyadmin.config.manager')->getBackendConfig(); if (0 === count($this->config['entities'])) { throw new NoEntitiesConfiguredException(); } // this condition happens when accessing the backend homepage and before // redirecting to the default page set as the homepage if (null === $entityName = $request->query->get('entity')) { return; } if (!array_key_exists($entityName, $this->config['entities'])) { throw new UndefinedEntityException(array('entity_name' => $entityName)); } $this->entity = $this->get('easyadmin.config.manager')->getEntityConfiguration($entityName); $action = $request->query->get('action', 'list'); if (!$request->query->has('sortField')) { $sortField = isset($this->entity[$action]['sort']['field']) ? $this->entity[$action]['sort']['field'] : $this->entity['primary_key_field_name']; $request->query->set('sortField', $sortField); } if (!$request->query->has('sortDirection')) { $sortDirection = isset($this->entity[$action]['sort']['direction']) ? $this->entity[$action]['sort']['direction'] : 'DESC'; $request->query->set('sortDirection', $sortDirection); } $this->em = $this->getDoctrine()->getManagerForClass($this->entity['class']); $this->request = $request; $this->dispatch(EasyAdminEvents::POST_INITIALIZE); } protected function dispatch($eventName, array $arguments = array()) { $arguments = array_replace(array( 'config' => $this->config, 'em' => $this->em, 'entity' => $this->entity, 'request' => $this->request, ), $arguments); $subject = isset($arguments['paginator']) ? $arguments['paginator'] : $arguments['entity']; $event = new GenericEvent($subject, $arguments); $this->get('event_dispatcher')->dispatch($eventName, $event); } /** * The method that returns the values displayed by an autocomplete field * based on the user's input. * * @return JsonResponse */ protected function autocompleteAction() { $results = $this->get('easyadmin.autocomplete')->find( $this->request->query->get('entity'), $this->request->query->get('query'), $this->request->query->get('page', 1) ); return new JsonResponse($results); } /** * The method that is executed when the user performs a 'list' action on an entity. * * @return Response */ protected function listAction() { $this->dispatch(EasyAdminEvents::PRE_LIST); $fields = $this->entity['list']['fields']; $paginator = $this->findAll($this->entity['class'], $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'), $this->entity['list']['dql_filter']); $this->dispatch(EasyAdminEvents::POST_LIST, array('paginator' => $paginator)); return $this->render($this->entity['templates']['list'], array( 'paginator' => $paginator, 'fields' => $fields, 'delete_form_template' => $this->createDeleteForm($this->entity['name'], '__id__')->createView(), )); } /** * The method that is executed when the user performs a 'edit' action on an entity. * * @return Response|RedirectResponse */ protected function editAction() { $this->dispatch(EasyAdminEvents::PRE_EDIT); $id = $this->request->query->get('id'); $easyadmin = $this->request->attributes->get('easyadmin'); $entity = $easyadmin['item']; if ($this->request->isXmlHttpRequest() && $property = $this->request->query->get('property')) { $newValue = 'true' === mb_strtolower($this->request->query->get('newValue')); $fieldsMetadata = $this->entity['list']['fields']; if (!isset($fieldsMetadata[$property]) || 'toggle' !== $fieldsMetadata[$property]['dataType']) { throw new \RuntimeException(sprintf('The type of the "%s" property is not "toggle".', $property)); } $this->updateEntityProperty($entity, $property, $newValue); // cast to integer instead of string to avoid sending empty responses for 'false' return new Response((int) $newValue); } $fields = $this->entity['edit']['fields']; $editForm = $this->executeDynamicMethod('create<EntityName>EditForm', array($entity, $fields)); $deleteForm = $this->createDeleteForm($this->entity['name'], $id); $editForm->handleRequest($this->request); if ($editForm->isSubmitted() && $editForm->isValid()) { $this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity)); $this->executeDynamicMethod('preUpdate<EntityName>Entity', array($entity)); $this->em->flush(); $this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity)); return $this->redirectToReferrer(); } $this->dispatch(EasyAdminEvents::POST_EDIT); return $this->render($this->entity['templates']['edit'], array( 'form' => $editForm->createView(), 'entity_fields' => $fields, 'entity' => $entity, 'delete_form' => $deleteForm->createView(), )); } /** * The method that is executed when the user performs a 'show' action on an entity. * * @return Response */ protected function showAction() { $this->dispatch(EasyAdminEvents::PRE_SHOW); $id = $this->request->query->get('id'); $easyadmin = $this->request->attributes->get('easyadmin'); $entity = $easyadmin['item']; $fields = $this->entity['show']['fields']; $deleteForm = $this->createDeleteForm($this->entity['name'], $id); $this->dispatch(EasyAdminEvents::POST_SHOW, array( 'deleteForm' => $deleteForm, 'fields' => $fields, 'entity' => $entity, )); return $this->render($this->entity['templates']['show'], array( 'entity' => $entity, 'fields' => $fields, 'delete_form' => $deleteForm->createView(), )); } /** * The method that is executed when the user performs a 'new' action on an entity. * * @return Response|RedirectResponse */ protected function newAction() { $this->dispatch(EasyAdminEvents::PRE_NEW); $entity = $this->executeDynamicMethod('createNew<EntityName>Entity'); $easyadmin = $this->request->attributes->get('easyadmin'); $easyadmin['item'] = $entity; $this->request->attributes->set('easyadmin', $easyadmin); $fields = $this->entity['new']['fields']; $newForm = $this->executeDynamicMethod('create<EntityName>NewForm', array($entity, $fields)); $newForm->handleRequest($this->request); if ($newForm->isSubmitted() && $newForm->isValid()) { $this->dispatch(EasyAdminEvents::PRE_PERSIST, array('entity' => $entity)); $this->executeDynamicMethod('prePersist<EntityName>Entity', array($entity)); $this->em->persist($entity); $this->em->flush(); $this->dispatch(EasyAdminEvents::POST_PERSIST, array('entity' => $entity)); return $this->redirectToReferrer(); } $this->dispatch(EasyAdminEvents::POST_NEW, array( 'entity_fields' => $fields, 'form' => $newForm, 'entity' => $entity, )); return $this->render($this->entity['templates']['new'], array( 'form' => $newForm->createView(), 'entity_fields' => $fields, 'entity' => $entity, )); } /** * The method that is executed when the user performs a 'delete' action to * remove any entity. * * @return RedirectResponse */ protected function deleteAction() { $this->dispatch(EasyAdminEvents::PRE_DELETE); if ('DELETE' !== $this->request->getMethod()) { return $this->redirect($this->generateUrl('easyadmin', array('action' => 'list', 'entity' => $this->entity['name']))); } $id = $this->request->query->get('id'); $form = $this->createDeleteForm($this->entity['name'], $id); $form->handleRequest($this->request); if ($form->isSubmitted() && $form->isValid()) { $easyadmin = $this->request->attributes->get('easyadmin'); $entity = $easyadmin['item']; $this->dispatch(EasyAdminEvents::PRE_REMOVE, array('entity' => $entity)); $this->executeDynamicMethod('preRemove<EntityName>Entity', array($entity)); try { $this->em->remove($entity); $this->em->flush(); } catch (ForeignKeyConstraintViolationException $e) { throw new EntityRemoveException(array('entity_name' => $this->entity['name'], 'message' => $e->getMessage())); } $this->dispatch(EasyAdminEvents::POST_REMOVE, array('entity' => $entity)); } $this->dispatch(EasyAdminEvents::POST_DELETE); return $this->redirectToReferrer(); } /** * The method that is executed when the user performs a query on an entity. * * @return Response */ protected function searchAction() { $this->dispatch(EasyAdminEvents::PRE_SEARCH); // if the search query is empty, redirect to the 'list' action if ('' === $this->request->query->get('query')) { $queryParameters = array_replace($this->request->query->all(), array('action' => 'list', 'query' => null)); $queryParameters = array_filter($queryParameters); return $this->redirect($this->get('router')->generate('easyadmin', $queryParameters)); } $searchableFields = $this->entity['search']['fields']; $paginator = $this->findBy($this->entity['class'], $this->request->query->get('query'), $searchableFields, $this->request->query->get('page', 1), $this->config['list']['max_results'], $this->request->query->get('sortField'), $this->request->query->get('sortDirection'), $this->entity['search']['dql_filter']); $fields = $this->entity['list']['fields']; $this->dispatch(EasyAdminEvents::POST_SEARCH, array( 'fields' => $fields, 'paginator' => $paginator, )); return $this->render($this->entity['templates']['list'], array( 'paginator' => $paginator, 'fields' => $fields, 'delete_form_template' => $this->createDeleteForm($this->entity['name'], '__id__')->createView(), )); } /** * It updates the value of some property of some entity to the new given value. * * @param mixed $entity The instance of the entity to modify * @param string $property The name of the property to change * @param bool $value The new value of the property * * @throws \RuntimeException */ protected function updateEntityProperty($entity, $property, $value) { $entityConfig = $this->entity; // the method_exists() check is needed because Symfony 2.3 doesn't have isWritable() method if (method_exists($this->get('property_accessor'), 'isWritable') && !$this->get('property_accessor')->isWritable($entity, $property)) { throw new \RuntimeException(sprintf('The "%s" property of the "%s" entity is not writable.', $property, $entityConfig['name'])); } $this->dispatch(EasyAdminEvents::PRE_UPDATE, array('entity' => $entity, 'newValue' => $value)); $this->get('property_accessor')->setValue($entity, $property, $value); $this->em->persist($entity); $this->em->flush(); $this->dispatch(EasyAdminEvents::POST_UPDATE, array('entity' => $entity, 'newValue' => $value)); $this->dispatch(EasyAdminEvents::POST_EDIT); } /** * Creates a new object of the current managed entity. * This method is mostly here for override convenience, because it allows * the user to use his own method to customize the entity instantiation. * * @return object */ protected function createNewEntity() { $entityFullyQualifiedClassName = $this->entity['class']; return new $entityFullyQualifiedClassName(); } /** * Allows applications to modify the entity associated with the item being * created before persisting it. * * @param object $entity */ protected function prePersistEntity($entity) { } /** * Allows applications to modify the entity associated with the item being * edited before persisting it. * * @param object $entity */ protected function preUpdateEntity($entity) { } /** * Allows applications to modify the entity associated with the item being * deleted before removing it. * * @param object $entity */ protected function preRemoveEntity($entity) { } /** * Performs a database query to get all the records related to the given * entity. It supports pagination and field sorting. * * @param string $entityClass * @param int $page * @param int $maxPerPage * @param string|null $sortField * @param string|null $sortDirection * @param string|null $dqlFilter * * @return Pagerfanta The paginated query results */ protected function findAll($entityClass, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null, $dqlFilter = null) { if (empty($sortDirection) || !in_array(strtoupper($sortDirection), array('ASC', 'DESC'))) { $sortDirection = 'DESC'; } $queryBuilder = $this->executeDynamicMethod('create<EntityName>ListQueryBuilder', array($entityClass, $sortDirection, $sortField, $dqlFilter)); $this->dispatch(EasyAdminEvents::POST_LIST_QUERY_BUILDER, array( 'query_builder' => $queryBuilder, 'sort_field' => $sortField, 'sort_direction' => $sortDirection, )); return $this->get('easyadmin.paginator')->createOrmPaginator($queryBuilder, $page, $maxPerPage); } /** * Creates Query Builder instance for all the records. * * @param string $entityClass * @param string $sortDirection * @param string|null $sortField * @param string|null $dqlFilter * * @return QueryBuilder The Query Builder instance */ protected function createListQueryBuilder($entityClass, $sortDirection, $sortField = null, $dqlFilter = null) { return $this->get('easyadmin.query_builder')->createListQueryBuilder($this->entity, $sortField, $sortDirection, $dqlFilter); } /** * Performs a database query based on the search query provided by the user. * It supports pagination and field sorting. * * @param string $entityClass * @param string $searchQuery * @param array $searchableFields * @param int $page * @param int $maxPerPage * @param string|null $sortField * @param string|null $sortDirection * @param string|null $dqlFilter * * @return Pagerfanta The paginated query results */ protected function findBy($entityClass, $searchQuery, array $searchableFields, $page = 1, $maxPerPage = 15, $sortField = null, $sortDirection = null, $dqlFilter = null) { $queryBuilder = $this->executeDynamicMethod('create<EntityName>SearchQueryBuilder', array($entityClass, $searchQuery, $searchableFields, $sortField, $sortDirection, $dqlFilter)); $this->dispatch(EasyAdminEvents::POST_SEARCH_QUERY_BUILDER, array( 'query_builder' => $queryBuilder, 'search_query' => $searchQuery, 'searchable_fields' => $searchableFields, )); return $this->get('easyadmin.paginator')->createOrmPaginator($queryBuilder, $page, $maxPerPage); } /** * Creates Query Builder instance for search query. * * @param string $entityClass * @param string $searchQuery * @param array $searchableFields * @param string|null $sortField * @param string|null $sortDirection * @param string|null $dqlFilter * * @return QueryBuilder The Query Builder instance */ protected function createSearchQueryBuilder($entityClass, $searchQuery, array $searchableFields, $sortField = null, $sortDirection = null, $dqlFilter = null) { return $this->get('easyadmin.query_builder')->createSearchQueryBuilder($this->entity, $searchQuery, $sortField, $sortDirection, $dqlFilter); } /** * Creates the form used to edit an entity. * * @param object $entity * @param array $entityProperties * * @return Form */ protected function createEditForm($entity, array $entityProperties) { return $this->createEntityForm($entity, $entityProperties, 'edit'); } /** * Creates the form used to create an entity. * * @param object $entity * @param array $entityProperties * * @return Form */ protected function createNewForm($entity, array $entityProperties) { return $this->createEntityForm($entity, $entityProperties, 'new'); } /** * Creates the form builder of the form used to create or edit the given entity. * * @param object $entity * @param string $view The name of the view where this form is used ('new' or 'edit') * * @return FormBuilder */ protected function createEntityFormBuilder($entity, $view) { $formOptions = $this->executeDynamicMethod('get<EntityName>EntityFormOptions', array($entity, $view)); return $this->get('form.factory')->createNamedBuilder(mb_strtolower($this->entity['name']), LegacyFormHelper::getType('easyadmin'), $entity, $formOptions); } /** * Retrieves the list of form options before sending them to the form builder. * This allows adding dynamic logic to the default form options. * * @param object $entity * @param string $view * * @return array */ protected function getEntityFormOptions($entity, $view) { $formOptions = $this->entity[$view]['form_options']; $formOptions['entity'] = $this->entity['name']; $formOptions['view'] = $view; return $formOptions; } /** * Creates the form object used to create or edit the given entity. * * @param object $entity * @param array $entityProperties * @param string $view * * @return FormInterface * * @throws \Exception */ protected function createEntityForm($entity, array $entityProperties, $view) { if (method_exists($this, $customMethodName = 'create'.$this->entity['name'].'EntityForm')) { $form = $this->{$customMethodName}($entity, $entityProperties, $view); if (!$form instanceof FormInterface) { throw new \UnexpectedValueException(sprintf( 'The "%s" method must return a FormInterface, "%s" given.', $customMethodName, is_object($form) ? get_class($form) : gettype($form) )); } return $form; } $formBuilder = $this->executeDynamicMethod('create<EntityName>EntityFormBuilder', array($entity, $view)); if (!$formBuilder instanceof FormBuilderInterface) { throw new \UnexpectedValueException(sprintf( 'The "%s" method must return a FormBuilderInterface, "%s" given.', 'createEntityForm', is_object($formBuilder) ? get_class($formBuilder) : gettype($formBuilder) )); } return $formBuilder->getForm(); } /** * Creates the form used to delete an entity. It must be a form because * the deletion of the entity are always performed with the 'DELETE' HTTP method, * which requires a form to work in the current browsers. * * @param string $entityName * @param int|string $entityId When reusing the delete form for multiple entities, a pattern string is passed instead of an integer * * @return Form|FormInterface */ protected function createDeleteForm($entityName, $entityId) { /** @var FormBuilder $formBuilder */ $formBuilder = $this->get('form.factory')->createNamedBuilder('delete_form') ->setAction($this->generateUrl('easyadmin', array('action' => 'delete', 'entity' => $entityName, 'id' => $entityId))) ->setMethod('DELETE') ; $formBuilder->add('submit', LegacyFormHelper::getType('submit'), array('label' => 'delete_modal.action', 'translation_domain' => 'EasyAdminBundle')); // needed to avoid submitting empty delete forms (see issue #1409) $formBuilder->add('_easyadmin_delete_flag', LegacyFormHelper::getType('hidden'), array('data' => '1')); return $formBuilder->getForm(); } /** * Utility method that checks if the given action is allowed for * the current entity. * * @param string $actionName * * @return bool */ protected function isActionAllowed($actionName) { return false === in_array($actionName, $this->entity['disabled_actions'], true); } /** * Utility shortcut to render an error when the requested action is not allowed * for the given entity. * * @param string $action * * @deprecated Use the ForbiddenException instead of this method * * @return Response */ protected function renderForbiddenActionError($action) { return $this->render('@EasyAdmin/error/forbidden_action.html.twig', array('action' => $action), new Response('', 403)); } /** * Given a method name pattern, it looks for the customized version of that * method (based on the entity name) and executes it. If the custom method * does not exist, it executes the regular method. * * For example: * executeDynamicMethod('create<EntityName>Entity') and the entity name is 'User' * if 'createUserEntity()' exists, execute it; otherwise execute 'createEntity()' * * @param string $methodNamePattern The pattern of the method name (dynamic parts are enclosed with <> angle brackets) * @param array $arguments The arguments passed to the executed method * * @return mixed */ protected function executeDynamicMethod($methodNamePattern, array $arguments = array()) { $methodName = str_replace('<EntityName>', $this->entity['name'], $methodNamePattern); if (!is_callable(array($this, $methodName))) { $methodName = str_replace('<EntityName>', '', $methodNamePattern); } return call_user_func_array(array($this, $methodName), $arguments); } /** * Generates the backend homepage and redirects to it. */ protected function redirectToBackendHomepage() { $homepageConfig = $this->config['homepage']; $url = isset($homepageConfig['url']) ? $homepageConfig['url'] : $this->get('router')->generate($homepageConfig['route'], $homepageConfig['params']); return $this->redirect($url); } /** * It renders the main CSS applied to the backend design. This controller * allows to generate dynamic CSS files that use variables without the need * to set up a CSS preprocessing toolchain. * * @deprecated The CSS styles are no longer rendered at runtime but preprocessed during container compilation. Use the $container['easyadmin.config']['_internal']['custom_css'] variable instead */ public function renderCssAction() { } /** * @return RedirectResponse */ protected function redirectToReferrer() { $referrerUrl = $this->request->query->get('referer', ''); if (!empty($referrerUrl)) { return $this->redirect(urldecode($referrerUrl)); } if ($this->isActionAllowed('list')) { return $this->redirect($this->generateUrl('easyadmin', array( 'action' => 'list', 'entity' => $this->entity['name'], ))); } return $this->redirectToBackendHomepage(); } } class_alias('EasyCorp\Bundle\EasyAdminBundle\Controller\AdminController', 'JavierEguiluz\Bundle\EasyAdminBundle\Controller\AdminController', false);
1
11,163
We should start thinking of an object that encapsulates this information :) we might need more arguments in the future.
EasyCorp-EasyAdminBundle
php
@@ -4652,10 +4652,12 @@ TEST_F(VkLayerTest, RenderPassBarrierConflicts) { img_barrier.subresourceRange.levelCount = 1; // Mis-match src stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01173"); + m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-01174"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now mis-match dst stage mask + m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01173"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-01174"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_HOST_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
1
/* * Copyright (c) 2015-2017 The Khronos Group Inc. * Copyright (c) 2015-2017 Valve Corporation * Copyright (c) 2015-2017 LunarG, Inc. * Copyright (c) 2015-2017 Google, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Courtney Goeltzenleuchter <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Mike Stroyan <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Tony Barbour <[email protected]> * Author: Cody Northrop <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Jeremy Kniager <[email protected]> */ #ifdef ANDROID #include "vulkan_wrapper.h" #else #define NOMINMAX #include <vulkan/vulkan.h> #endif #include "layers/vk_device_profile_api_layer.h" #if defined(ANDROID) && defined(VALIDATION_APK) #include <android/log.h> #include <android_native_app_glue.h> #endif #include "icd-spv.h" #include "test_common.h" #include "vk_layer_config.h" #include "vk_format_utils.h" #include "vk_validation_error_messages.h" #include "vkrenderframework.h" #include "vk_typemap_helper.h" #include <algorithm> #include <cmath> #include <functional> #include <limits> #include <memory> #include <unordered_set> //-------------------------------------------------------------------------------------- // Mesh and VertexFormat Data //-------------------------------------------------------------------------------------- const char *kSkipPrefix = " TEST SKIPPED:"; enum BsoFailSelect { BsoFailNone, BsoFailLineWidth, BsoFailDepthBias, BsoFailViewport, BsoFailScissor, BsoFailBlend, BsoFailDepthBounds, BsoFailStencilReadMask, BsoFailStencilWriteMask, BsoFailStencilReference, BsoFailCmdClearAttachments, BsoFailIndexBuffer, BsoFailIndexBufferBadSize, BsoFailIndexBufferBadOffset, BsoFailIndexBufferBadMapSize, BsoFailIndexBufferBadMapOffset }; static const char bindStateVertShaderText[] = "#version 450\n" "vec2 vertices[3];\n" "void main() {\n" " vertices[0] = vec2(-1.0, -1.0);\n" " vertices[1] = vec2( 1.0, -1.0);\n" " vertices[2] = vec2( 0.0, 1.0);\n" " gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n" "}\n"; static const char bindStateFragShaderText[] = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; // Static arrays helper template <class ElementT, size_t array_size> size_t size(ElementT (&)[array_size]) { return array_size; } // Format search helper VkFormat FindSupportedDepthStencilFormat(VkPhysicalDevice phy) { VkFormat ds_formats[] = {VK_FORMAT_D16_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT}; for (uint32_t i = 0; i < sizeof(ds_formats); i++) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, ds_formats[i], &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { return ds_formats[i]; } } return VK_FORMAT_UNDEFINED; } // Returns true if *any* requested features are available. // Assumption is that the framework can successfully create an image as // long as at least one of the feature bits is present (excepting VTX_BUF). bool ImageFormatIsSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL, VkFormatFeatureFlags features = ~VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, format, &format_props); VkFormatFeatureFlags phy_features = (VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures); return (0 != (phy_features & features)); } // Returns true if format and *all* requested features are available. bool ImageFormatAndFeaturesSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(phy, format, &format_props); VkFormatFeatureFlags phy_features = (VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures); return (features == (phy_features & features)); } // Returns true if format and *all* requested features are available. bool ImageFormatAndFeaturesSupported(const VkInstance inst, const VkPhysicalDevice phy, const VkImageCreateInfo info, const VkFormatFeatureFlags features) { // Verify physical device support of format features if (!ImageFormatAndFeaturesSupported(phy, info.format, info.tiling, features)) { return false; } // Verify that PhysDevImageFormatProp() also claims support for the specific usage VkImageFormatProperties props; VkResult err = vkGetPhysicalDeviceImageFormatProperties(phy, info.format, info.imageType, info.tiling, info.usage, info.flags, &props); if (VK_SUCCESS != err) { return false; } #if 0 // Convinced this chunk doesn't currently add any additional info, but leaving in place because it may be // necessary with future extensions // Verify again using version 2, if supported, which *can* return more property data than the original... // (It's not clear that this is any more definitive than using the original version - but no harm) PFN_vkGetPhysicalDeviceImageFormatProperties2KHR p_GetPDIFP2KHR = (PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)vkGetInstanceProcAddr(inst, "vkGetPhysicalDeviceImageFormatProperties2KHR"); if (NULL != p_GetPDIFP2KHR) { VkPhysicalDeviceImageFormatInfo2KHR fmt_info{}; fmt_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR; fmt_info.pNext = nullptr; fmt_info.format = info.format; fmt_info.type = info.imageType; fmt_info.tiling = info.tiling; fmt_info.usage = info.usage; fmt_info.flags = info.flags; VkImageFormatProperties2KHR fmt_props = {}; fmt_props.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR; err = p_GetPDIFP2KHR(phy, &fmt_info, &fmt_props); if (VK_SUCCESS != err) { return false; } } #endif return true; } // Validation report callback prototype static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData); // Simple sane SamplerCreateInfo boilerplate static VkSamplerCreateInfo SafeSaneSamplerCreateInfo() { VkSamplerCreateInfo sampler_create_info = {}; sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; sampler_create_info.pNext = nullptr; sampler_create_info.magFilter = VK_FILTER_NEAREST; sampler_create_info.minFilter = VK_FILTER_NEAREST; sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_create_info.mipLodBias = 0.0; sampler_create_info.anisotropyEnable = VK_FALSE; sampler_create_info.maxAnisotropy = 1.0; sampler_create_info.compareEnable = VK_FALSE; sampler_create_info.compareOp = VK_COMPARE_OP_NEVER; sampler_create_info.minLod = 0.0; sampler_create_info.maxLod = 16.0; sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE; sampler_create_info.unnormalizedCoordinates = VK_FALSE; return sampler_create_info; } // Dependent "false" type for the static assert, as GCC will evaluate // non-dependent static_asserts even for non-instantiated templates template <typename T> struct AlwaysFalse : std::false_type {}; // Helpers to get nearest greater or smaller value (of float) -- useful for testing the boundary cases of Vulkan limits template <typename T> T NearestGreater(const T from) { using Lim = std::numeric_limits<T>; const auto positive_direction = Lim::has_infinity ? Lim::infinity() : Lim::max(); return std::nextafter(from, positive_direction); } template <typename T> T NearestSmaller(const T from) { using Lim = std::numeric_limits<T>; const auto negative_direction = Lim::has_infinity ? -Lim::infinity() : Lim::lowest(); return std::nextafter(from, negative_direction); } // ErrorMonitor Usage: // // Call SetDesiredFailureMsg with a string to be compared against all // encountered log messages, or a validation error enum identifying // desired error message. Passing NULL or VALIDATION_ERROR_MAX_ENUM // will match all log messages. logMsg will return true for skipCall // only if msg is matched or NULL. // // Call VerifyFound to determine if all desired failure messages // were encountered. Call VerifyNotFound to determine if any unexpected // failure was encountered. class ErrorMonitor { public: ErrorMonitor() { test_platform_thread_create_mutex(&mutex_); test_platform_thread_lock_mutex(&mutex_); Reset(); test_platform_thread_unlock_mutex(&mutex_); } ~ErrorMonitor() { test_platform_thread_delete_mutex(&mutex_); } // Set monitor to pristine state void Reset() { message_flags_ = VK_DEBUG_REPORT_ERROR_BIT_EXT; bailout_ = NULL; message_found_ = VK_FALSE; failure_message_strings_.clear(); desired_message_strings_.clear(); ignore_message_strings_.clear(); other_messages_.clear(); message_outstanding_count_ = 0; } // ErrorMonitor will look for an error message containing the specified string(s) void SetDesiredFailureMsg(const VkFlags msgFlags, const std::string msg) { SetDesiredFailureMsg(msgFlags, msg.c_str()); } void SetDesiredFailureMsg(const VkFlags msgFlags, const char *const msgString) { test_platform_thread_lock_mutex(&mutex_); desired_message_strings_.insert(msgString); message_flags_ |= msgFlags; message_outstanding_count_++; test_platform_thread_unlock_mutex(&mutex_); } // ErrorMonitor will look for an error message containing the specified string(s) template <typename Iter> void SetDesiredFailureMsg(const VkFlags msgFlags, Iter iter, const Iter end) { for (; iter != end; ++iter) { SetDesiredFailureMsg(msgFlags, *iter); } } // Set an error that the error monitor will ignore. Do not use this function if you are creating a new test. // TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this // function and its definition. void SetUnexpectedError(const char *const msg) { test_platform_thread_lock_mutex(&mutex_); ignore_message_strings_.emplace_back(msg); test_platform_thread_unlock_mutex(&mutex_); } VkBool32 CheckForDesiredMsg(const uint32_t message_code, const char *const msgString) { VkBool32 result = VK_FALSE; test_platform_thread_lock_mutex(&mutex_); if (bailout_ != nullptr) { *bailout_ = true; } string errorString(msgString); bool found_expected = false; if (!IgnoreMessage(errorString)) { for (auto desired_msg : desired_message_strings_) { if (desired_msg.length() == 0) { // An empty desired_msg string "" indicates a positive test - not expecting an error. // Return true to avoid calling layers/driver with this error. // And don't erase the "" string, so it remains if another error is found. result = VK_TRUE; found_expected = true; message_found_ = true; failure_message_strings_.insert(errorString); } else if (errorString.find(desired_msg) != string::npos) { found_expected = true; message_outstanding_count_--; failure_message_strings_.insert(errorString); message_found_ = true; result = VK_TRUE; // We only want one match for each expected error so remove from set here // Since we're about the break the loop it's OK to remove from set we're iterating over desired_message_strings_.erase(desired_msg); break; } } if (!found_expected) { printf("Unexpected: %s\n", msgString); other_messages_.push_back(errorString); } } test_platform_thread_unlock_mutex(&mutex_); return result; } vector<string> GetOtherFailureMsgs() const { return other_messages_; } VkDebugReportFlagsEXT GetMessageFlags() const { return message_flags_; } bool AnyDesiredMsgFound() const { return message_found_; } bool AllDesiredMsgsFound() const { return (0 == message_outstanding_count_); } void SetBailout(bool *bailout) { bailout_ = bailout; } void DumpFailureMsgs() const { vector<string> otherMsgs = GetOtherFailureMsgs(); if (otherMsgs.size()) { cout << "Other error messages logged for this test were:" << endl; for (auto iter = otherMsgs.begin(); iter != otherMsgs.end(); iter++) { cout << " " << *iter << endl; } } } // Helpers // ExpectSuccess now takes an optional argument allowing a custom combination of debug flags void ExpectSuccess(VkDebugReportFlagsEXT const message_flag_mask = VK_DEBUG_REPORT_ERROR_BIT_EXT) { // Match ANY message matching specified type SetDesiredFailureMsg(message_flag_mask, ""); message_flags_ = message_flag_mask; // override mask handling in SetDesired... } void VerifyFound() { // Not seeing the desired message is a failure. /Before/ throwing, dump any other messages. if (!AllDesiredMsgsFound()) { DumpFailureMsgs(); for (auto desired_msg : desired_message_strings_) { ADD_FAILURE() << "Did not receive expected error '" << desired_msg << "'"; } } Reset(); } void VerifyNotFound() { // ExpectSuccess() configured us to match anything. Any error is a failure. if (AnyDesiredMsgFound()) { DumpFailureMsgs(); for (auto msg : failure_message_strings_) { ADD_FAILURE() << "Expected to succeed but got error: " << msg; } } Reset(); } private: // TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this // function and its definition. bool IgnoreMessage(std::string const &msg) const { if (ignore_message_strings_.empty()) { return false; } return std::find_if(ignore_message_strings_.begin(), ignore_message_strings_.end(), [&msg](std::string const &str) { return msg.find(str) != std::string::npos; }) != ignore_message_strings_.end(); } VkFlags message_flags_; std::unordered_set<string> desired_message_strings_; std::unordered_set<string> failure_message_strings_; std::vector<std::string> ignore_message_strings_; vector<string> other_messages_; test_platform_thread_mutex mutex_; bool *bailout_; bool message_found_; int message_outstanding_count_; }; static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject, size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg, void *pUserData) { ErrorMonitor *errMonitor = (ErrorMonitor *)pUserData; if (msgFlags & errMonitor->GetMessageFlags()) { #ifdef _DEBUG char embedded_code_string[2048]; snprintf(embedded_code_string, 2048, "%s [%08x]", pMsg, msgCode); return errMonitor->CheckForDesiredMsg(msgCode, embedded_code_string); #else return errMonitor->CheckForDesiredMsg(msgCode, pMsg); #endif } return VK_FALSE; } class VkLayerTest : public VkRenderFramework { public: void VKTriangleTest(BsoFailSelect failCase); void GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase); void Init(VkPhysicalDeviceFeatures *features = nullptr, VkPhysicalDeviceFeatures2 *features2 = nullptr, const VkCommandPoolCreateFlags flags = 0) { InitFramework(myDbgFunc, m_errorMonitor); InitState(features, features2, flags); } protected: ErrorMonitor *m_errorMonitor; public: ErrorMonitor *Monitor() { return m_errorMonitor; } VkCommandBufferObj *CommandBuffer() { return m_commandBuffer; } protected: bool m_enableWSI; virtual void SetUp() { m_instance_layer_names.clear(); m_instance_extension_names.clear(); m_device_extension_names.clear(); // Add default instance extensions to the list m_instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME); // Use Threading layer first to protect others from // ThreadCommandBufferCollision test m_instance_layer_names.push_back("VK_LAYER_GOOGLE_threading"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_parameter_validation"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_object_tracker"); m_instance_layer_names.push_back("VK_LAYER_LUNARG_core_validation"); m_instance_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects"); if (VkTestFramework::m_devsim_layer) { if (InstanceLayerSupported("VK_LAYER_LUNARG_device_simulation")) { m_instance_layer_names.push_back("VK_LAYER_LUNARG_device_simulation"); } else { VkTestFramework::m_devsim_layer = false; printf(" Did not find VK_LAYER_LUNARG_device_simulation layer so it will not be enabled.\n"); } } if (m_enableWSI) { m_instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME); #ifdef NEED_TO_TEST_THIS_ON_PLATFORM #if defined(VK_USE_PLATFORM_ANDROID_KHR) m_instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_ANDROID_KHR #if defined(VK_USE_PLATFORM_MIR_KHR) m_instance_extension_names.push_back(VK_KHR_MIR_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_MIR_KHR #if defined(VK_USE_PLATFORM_WAYLAND_KHR) m_instance_extension_names.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_WAYLAND_KHR #if defined(VK_USE_PLATFORM_WIN32_KHR) m_instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_WIN32_KHR #endif // NEED_TO_TEST_THIS_ON_PLATFORM #if defined(VK_USE_PLATFORM_XCB_KHR) m_instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME); #elif defined(VK_USE_PLATFORM_XLIB_KHR) m_instance_extension_names.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME); #endif // VK_USE_PLATFORM_XLIB_KHR } this->app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; this->app_info.pNext = NULL; this->app_info.pApplicationName = "layer_tests"; this->app_info.applicationVersion = 1; this->app_info.pEngineName = "unittest"; this->app_info.engineVersion = 1; this->app_info.apiVersion = VK_API_VERSION_1_0; m_errorMonitor = new ErrorMonitor; } bool LoadDeviceProfileLayer( PFN_vkSetPhysicalDeviceFormatPropertiesEXT &fpvkSetPhysicalDeviceFormatPropertiesEXT, PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT &fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT) { // Load required functions fpvkSetPhysicalDeviceFormatPropertiesEXT = (PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT"); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = (PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr( instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT"); if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return 0; } return 1; } virtual void TearDown() { // Clean up resources before we reset ShutdownFramework(); delete m_errorMonitor; } VkLayerTest() { m_enableWSI = false; } }; void VkLayerTest::VKTriangleTest(BsoFailSelect failCase) { ASSERT_TRUE(m_device && m_device->initialized()); // VKTriangleTest assumes Init() has finished ASSERT_NO_FATAL_FAILURE(InitViewport()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipelineobj(m_device); pipelineobj.AddDefaultColorAttachment(); pipelineobj.AddShader(&vs); pipelineobj.AddShader(&ps); bool failcase_needs_depth = false; // to mark cases that need depth attachment VkBufferObj index_buffer; switch (failCase) { case BsoFailLineWidth: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_LINE_WIDTH); VkPipelineInputAssemblyStateCreateInfo ia_state = {}; ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST; pipelineobj.SetInputAssembly(&ia_state); break; } case BsoFailDepthBias: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BIAS); VkPipelineRasterizationStateCreateInfo rs_state = {}; rs_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state.depthBiasEnable = VK_TRUE; rs_state.lineWidth = 1.0f; pipelineobj.SetRasterization(&rs_state); break; } case BsoFailViewport: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); break; } case BsoFailScissor: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); break; } case BsoFailBlend: { pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_BLEND_CONSTANTS); VkPipelineColorBlendAttachmentState att_state = {}; att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state.blendEnable = VK_TRUE; pipelineobj.AddColorAttachment(0, att_state); break; } case BsoFailDepthBounds: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BOUNDS); break; } case BsoFailStencilReadMask: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK); break; } case BsoFailStencilWriteMask: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK); break; } case BsoFailStencilReference: { failcase_needs_depth = true; pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_REFERENCE); break; } case BsoFailIndexBuffer: break; case BsoFailIndexBufferBadSize: case BsoFailIndexBufferBadOffset: case BsoFailIndexBufferBadMapSize: case BsoFailIndexBufferBadMapOffset: { // Create an index buffer for these tests. // There is no need to populate it because we should bail before trying to draw. uint32_t const indices[] = {0}; VkBufferCreateInfo buffer_info = {}; buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_info.size = 1024; buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buffer_info.queueFamilyIndexCount = 1; buffer_info.pQueueFamilyIndices = indices; index_buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); } break; case BsoFailCmdClearAttachments: break; case BsoFailNone: break; default: break; } VkDescriptorSetObj descriptorSet(m_device); VkImageView *depth_attachment = nullptr; if (failcase_needs_depth) { m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != VK_FORMAT_UNDEFINED); m_depthStencil->Init(m_device, static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height), m_depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); depth_attachment = m_depthStencil->BindInfo(); } ASSERT_NO_FATAL_FAILURE(InitRenderTarget(1, depth_attachment)); m_commandBuffer->begin(); GenericDrawPreparation(m_commandBuffer, pipelineobj, descriptorSet, failCase); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // render triangle if (failCase == BsoFailIndexBuffer) { // Use DrawIndexed w/o an index buffer bound m_commandBuffer->DrawIndexed(3, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadSize) { // Bind the index buffer and draw one too many indices m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(513, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadOffset) { // Bind the index buffer and draw one past the end of the buffer using the offset m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(512, 1, 1, 0, 0); } else if (failCase == BsoFailIndexBufferBadMapSize) { // Bind the index buffer at the middle point and draw one too many indices m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(257, 1, 0, 0, 0); } else if (failCase == BsoFailIndexBufferBadMapOffset) { // Bind the index buffer at the middle point and draw one past the end of the buffer m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16); m_commandBuffer->DrawIndexed(256, 1, 1, 0, 0); } else { m_commandBuffer->Draw(3, 1, 0, 0); } if (failCase == BsoFailCmdClearAttachments) { VkClearAttachment color_attachment = {}; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.colorAttachment = 1; // Someone who knew what they were doing would use 0 for the index; VkClearRect clear_rect = {{{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}}, 0, 0}; vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); } // finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(true); DestroyRenderTarget(); } void VkLayerTest::GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase) { commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color); commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil); // Make sure depthWriteEnable is set so that Depth fail test will work // correctly // Make sure stencilTestEnable is set so that Stencil fail test will work // correctly VkStencilOpState stencil = {}; stencil.failOp = VK_STENCIL_OP_KEEP; stencil.passOp = VK_STENCIL_OP_KEEP; stencil.depthFailOp = VK_STENCIL_OP_KEEP; stencil.compareOp = VK_COMPARE_OP_NEVER; VkPipelineDepthStencilStateCreateInfo ds_ci = {}; ds_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO; ds_ci.pNext = NULL; ds_ci.depthTestEnable = VK_FALSE; ds_ci.depthWriteEnable = VK_TRUE; ds_ci.depthCompareOp = VK_COMPARE_OP_NEVER; ds_ci.depthBoundsTestEnable = VK_FALSE; if (failCase == BsoFailDepthBounds) { ds_ci.depthBoundsTestEnable = VK_TRUE; ds_ci.maxDepthBounds = 0.0f; ds_ci.minDepthBounds = 0.0f; } ds_ci.stencilTestEnable = VK_TRUE; ds_ci.front = stencil; ds_ci.back = stencil; pipelineobj.SetDepthStencil(&ds_ci); pipelineobj.SetViewport(m_viewports); pipelineobj.SetScissor(m_scissors); descriptorSet.CreateVKDescriptorSet(commandBuffer); VkResult err = pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); vkCmdBindPipeline(commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineobj.handle()); commandBuffer->BindDescriptorSet(descriptorSet); } class VkPositiveLayerTest : public VkLayerTest { public: protected: }; class VkWsiEnabledLayerTest : public VkLayerTest { public: protected: VkWsiEnabledLayerTest() { m_enableWSI = true; } }; class VkBufferTest { public: enum eTestEnFlags { eDoubleDelete, eInvalidDeviceOffset, eInvalidMemoryOffset, eBindNullBuffer, eBindFakeBuffer, eFreeInvalidHandle, eNone, }; enum eTestConditions { eOffsetAlignment = 1 }; static bool GetTestConditionValid(VkDeviceObj *aVulkanDevice, eTestEnFlags aTestFlag, VkBufferUsageFlags aBufferUsage = 0) { if (eInvalidDeviceOffset != aTestFlag && eInvalidMemoryOffset != aTestFlag) { return true; } VkDeviceSize offset_limit = 0; if (eInvalidMemoryOffset == aTestFlag) { VkBuffer vulkanBuffer; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 32; buffer_create_info.usage = aBufferUsage; vkCreateBuffer(aVulkanDevice->device(), &buffer_create_info, nullptr, &vulkanBuffer); VkMemoryRequirements memory_reqs = {}; vkGetBufferMemoryRequirements(aVulkanDevice->device(), vulkanBuffer, &memory_reqs); vkDestroyBuffer(aVulkanDevice->device(), vulkanBuffer, nullptr); offset_limit = memory_reqs.alignment; } else if ((VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minTexelBufferOffsetAlignment; } else if (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minUniformBufferOffsetAlignment; } else if (VK_BUFFER_USAGE_STORAGE_BUFFER_BIT & aBufferUsage) { offset_limit = aVulkanDevice->props.limits.minStorageBufferOffsetAlignment; } return eOffsetAlignment < offset_limit; } // A constructor which performs validation tests within construction. VkBufferTest(VkDeviceObj *aVulkanDevice, VkBufferUsageFlags aBufferUsage, eTestEnFlags aTestFlag = eNone) : AllocateCurrent(true), BoundCurrent(false), CreateCurrent(false), InvalidDeleteEn(false), VulkanDevice(aVulkanDevice->device()) { if (eBindNullBuffer == aTestFlag || eBindFakeBuffer == aTestFlag) { VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = 1; // fake size -- shouldn't matter for the test memory_allocate_info.memoryTypeIndex = 0; // fake type -- shouldn't matter for the test vkAllocateMemory(VulkanDevice, &memory_allocate_info, nullptr, &VulkanMemory); VulkanBuffer = (aTestFlag == eBindNullBuffer) ? VK_NULL_HANDLE : (VkBuffer)0xCDCDCDCDCDCDCDCD; vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, 0); } else { VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 32; buffer_create_info.usage = aBufferUsage; vkCreateBuffer(VulkanDevice, &buffer_create_info, nullptr, &VulkanBuffer); CreateCurrent = true; VkMemoryRequirements memory_requirements; vkGetBufferMemoryRequirements(VulkanDevice, VulkanBuffer, &memory_requirements); VkMemoryAllocateInfo memory_allocate_info = {}; memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_allocate_info.allocationSize = memory_requirements.size + eOffsetAlignment; bool pass = aVulkanDevice->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { CreateCurrent = false; vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); return; } vkAllocateMemory(VulkanDevice, &memory_allocate_info, NULL, &VulkanMemory); // NB: 1 is intentionally an invalid offset value const bool offset_en = eInvalidDeviceOffset == aTestFlag || eInvalidMemoryOffset == aTestFlag; vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, offset_en ? eOffsetAlignment : 0); BoundCurrent = true; InvalidDeleteEn = (eFreeInvalidHandle == aTestFlag); } } ~VkBufferTest() { if (CreateCurrent) { vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); } if (AllocateCurrent) { if (InvalidDeleteEn) { union { VkDeviceMemory device_memory; unsigned long long index_access; } bad_index; bad_index.device_memory = VulkanMemory; bad_index.index_access++; vkFreeMemory(VulkanDevice, bad_index.device_memory, nullptr); } vkFreeMemory(VulkanDevice, VulkanMemory, nullptr); } } bool GetBufferCurrent() { return AllocateCurrent && BoundCurrent && CreateCurrent; } const VkBuffer &GetBuffer() { return VulkanBuffer; } void TestDoubleDestroy() { // Destroy the buffer but leave the flag set, which will cause // the buffer to be destroyed again in the destructor. vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr); } protected: bool AllocateCurrent; bool BoundCurrent; bool CreateCurrent; bool InvalidDeleteEn; VkBuffer VulkanBuffer; VkDevice VulkanDevice; VkDeviceMemory VulkanMemory; }; class VkVerticesObj { public: VkVerticesObj(VkDeviceObj *aVulkanDevice, unsigned aAttributeCount, unsigned aBindingCount, unsigned aByteStride, VkDeviceSize aVertexCount, const float *aVerticies) : BoundCurrent(false), AttributeCount(aAttributeCount), BindingCount(aBindingCount), BindId(BindIdGenerator), PipelineVertexInputStateCreateInfo(), VulkanMemoryBuffer(aVulkanDevice, static_cast<int>(aByteStride * aVertexCount), reinterpret_cast<const void *>(aVerticies), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) { BindIdGenerator++; // NB: This can wrap w/misuse VertexInputAttributeDescription = new VkVertexInputAttributeDescription[AttributeCount]; VertexInputBindingDescription = new VkVertexInputBindingDescription[BindingCount]; PipelineVertexInputStateCreateInfo.pVertexAttributeDescriptions = VertexInputAttributeDescription; PipelineVertexInputStateCreateInfo.vertexAttributeDescriptionCount = AttributeCount; PipelineVertexInputStateCreateInfo.pVertexBindingDescriptions = VertexInputBindingDescription; PipelineVertexInputStateCreateInfo.vertexBindingDescriptionCount = BindingCount; PipelineVertexInputStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; unsigned i = 0; do { VertexInputAttributeDescription[i].binding = BindId; VertexInputAttributeDescription[i].location = i; VertexInputAttributeDescription[i].format = VK_FORMAT_R32G32B32_SFLOAT; VertexInputAttributeDescription[i].offset = sizeof(float) * aByteStride; i++; } while (AttributeCount < i); i = 0; do { VertexInputBindingDescription[i].binding = BindId; VertexInputBindingDescription[i].stride = aByteStride; VertexInputBindingDescription[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; i++; } while (BindingCount < i); } ~VkVerticesObj() { if (VertexInputAttributeDescription) { delete[] VertexInputAttributeDescription; } if (VertexInputBindingDescription) { delete[] VertexInputBindingDescription; } } bool AddVertexInputToPipe(VkPipelineObj &aPipelineObj) { aPipelineObj.AddVertexInputAttribs(VertexInputAttributeDescription, AttributeCount); aPipelineObj.AddVertexInputBindings(VertexInputBindingDescription, BindingCount); return true; } void BindVertexBuffers(VkCommandBuffer aCommandBuffer, unsigned aOffsetCount = 0, VkDeviceSize *aOffsetList = nullptr) { VkDeviceSize *offsetList; unsigned offsetCount; if (aOffsetCount) { offsetList = aOffsetList; offsetCount = aOffsetCount; } else { offsetList = new VkDeviceSize[1](); offsetCount = 1; } vkCmdBindVertexBuffers(aCommandBuffer, BindId, offsetCount, &VulkanMemoryBuffer.handle(), offsetList); BoundCurrent = true; if (!aOffsetCount) { delete[] offsetList; } } protected: static uint32_t BindIdGenerator; bool BoundCurrent; unsigned AttributeCount; unsigned BindingCount; uint32_t BindId; VkPipelineVertexInputStateCreateInfo PipelineVertexInputStateCreateInfo; VkVertexInputAttributeDescription *VertexInputAttributeDescription; VkVertexInputBindingDescription *VertexInputBindingDescription; VkConstantBufferObj VulkanMemoryBuffer; }; uint32_t VkVerticesObj::BindIdGenerator; struct OneOffDescriptorSet { VkDeviceObj *device_; VkDescriptorPool pool_; VkDescriptorSetLayoutObj layout_; VkDescriptorSet set_; typedef std::vector<VkDescriptorSetLayoutBinding> Bindings; OneOffDescriptorSet(VkDeviceObj *device, const Bindings &bindings) : device_{device}, pool_{}, layout_(device, bindings), set_{} { VkResult err; std::vector<VkDescriptorPoolSize> sizes; for (const auto &b : bindings) sizes.push_back({b.descriptorType, std::max(1u, b.descriptorCount)}); VkDescriptorPoolCreateInfo dspci = { VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, 0, 1, uint32_t(sizes.size()), sizes.data()}; err = vkCreateDescriptorPool(device_->handle(), &dspci, nullptr, &pool_); if (err != VK_SUCCESS) return; VkDescriptorSetAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, pool_, 1, &layout_.handle()}; err = vkAllocateDescriptorSets(device_->handle(), &alloc_info, &set_); } ~OneOffDescriptorSet() { // No need to destroy set-- it's going away with the pool. vkDestroyDescriptorPool(device_->handle(), pool_, nullptr); } bool Initialized() { return pool_ != VK_NULL_HANDLE && layout_.initialized() && set_ != VK_NULL_HANDLE; } }; template <typename T> bool IsValidVkStruct(const T &s) { return LvlTypeMap<T>::kSType == s.sType; } // Helper class for tersely creating create pipeline tests // // Designed with minimal error checking to ensure easy error state creation // See OneshotTest for typical usage struct CreatePipelineHelper { public: std::vector<VkDescriptorSetLayoutBinding> dsl_bindings_; std::unique_ptr<OneOffDescriptorSet> descriptor_set_; std::vector<VkPipelineShaderStageCreateInfo> shader_stages_; VkPipelineVertexInputStateCreateInfo vi_ci_ = {}; VkPipelineInputAssemblyStateCreateInfo ia_ci_ = {}; VkPipelineTessellationStateCreateInfo tess_ci_ = {}; VkViewport viewport_ = {}; VkRect2D scissor_ = {}; VkPipelineViewportStateCreateInfo vp_state_ci_ = {}; VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci_ = {}; VkPipelineLayoutCreateInfo pipeline_layout_ci_ = {}; VkPipelineLayoutObj pipeline_layout_; VkPipelineDynamicStateCreateInfo dyn_state_ci_ = {}; VkPipelineRasterizationStateCreateInfo rs_state_ci_ = {}; VkPipelineColorBlendAttachmentState cb_attachments_ = {}; VkPipelineColorBlendStateCreateInfo cb_ci_ = {}; VkGraphicsPipelineCreateInfo gp_ci_ = {}; VkPipelineCacheCreateInfo pc_ci_ = {}; VkPipeline pipeline_ = VK_NULL_HANDLE; VkPipelineCache pipeline_cache_ = VK_NULL_HANDLE; std::unique_ptr<VkShaderObj> vs_; std::unique_ptr<VkShaderObj> fs_; VkLayerTest &layer_test_; CreatePipelineHelper(VkLayerTest &test) : layer_test_(test) {} ~CreatePipelineHelper() { VkDevice device = layer_test_.device(); vkDestroyPipelineCache(device, pipeline_cache_, nullptr); vkDestroyPipeline(device, pipeline_, nullptr); } void InitDescriptorSetInfo() { dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; } void InitInputAndVertexInfo() { vi_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; ia_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; } void InitMultisampleInfo() { pipe_ms_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci_.pNext = nullptr; pipe_ms_state_ci_.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci_.sampleShadingEnable = VK_FALSE; pipe_ms_state_ci_.minSampleShading = 1.0; pipe_ms_state_ci_.pSampleMask = NULL; } void InitPipelineLayoutInfo() { pipeline_layout_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci_.setLayoutCount = 1; // Not really changeable because InitState() sets exactly one pSetLayout pipeline_layout_ci_.pSetLayouts = nullptr; // must bound after it is created } void InitViewportInfo() { viewport_ = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; scissor_ = {{0, 0}, {64, 64}}; vp_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_ci_.pNext = nullptr; vp_state_ci_.viewportCount = 1; vp_state_ci_.pViewports = &viewport_; // ignored if dynamic vp_state_ci_.scissorCount = 1; vp_state_ci_.pScissors = &scissor_; // ignored if dynamic } void InitDynamicStateInfo() { // Use a "validity" check on the {} initialized structure to detect initialization // during late bind } void InitShaderInfo() { vs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, &layer_test_)); fs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, &layer_test_)); // We shouldn't need a fragment shader but add it to be able to run on more devices shader_stages_ = {vs_->GetStageCreateInfo(), fs_->GetStageCreateInfo()}; } void InitRasterizationInfo() { rs_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci_.pNext = nullptr; rs_state_ci_.flags = 0; rs_state_ci_.depthClampEnable = VK_FALSE; rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; rs_state_ci_.polygonMode = VK_POLYGON_MODE_FILL; rs_state_ci_.cullMode = VK_CULL_MODE_BACK_BIT; rs_state_ci_.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rs_state_ci_.depthBiasEnable = VK_FALSE; rs_state_ci_.lineWidth = 1.0F; } void InitBlendStateInfo() { cb_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb_ci_.logicOpEnable = VK_FALSE; cb_ci_.logicOp = VK_LOGIC_OP_COPY; // ignored if enable is VK_FALSE above cb_ci_.attachmentCount = layer_test_.RenderPassInfo().subpassCount; ASSERT_TRUE(IsValidVkStruct(layer_test_.RenderPassInfo())); cb_ci_.pAttachments = &cb_attachments_; for (int i = 0; i < 4; i++) { cb_ci_.blendConstants[0] = 1.0F; } } void InitGraphicsPipelineInfo() { // Color-only rendering in a subpass with no depth/stencil attachment // Active Pipeline Shader Stages // Vertex Shader // Fragment Shader // Required: Fixed-Function Pipeline Stages // VkPipelineVertexInputStateCreateInfo // VkPipelineInputAssemblyStateCreateInfo // VkPipelineViewportStateCreateInfo // VkPipelineRasterizationStateCreateInfo // VkPipelineMultisampleStateCreateInfo // VkPipelineColorBlendStateCreateInfo gp_ci_.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci_.pNext = nullptr; gp_ci_.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci_.pVertexInputState = &vi_ci_; gp_ci_.pInputAssemblyState = &ia_ci_; gp_ci_.pTessellationState = nullptr; gp_ci_.pViewportState = &vp_state_ci_; gp_ci_.pRasterizationState = &rs_state_ci_; gp_ci_.pMultisampleState = &pipe_ms_state_ci_; gp_ci_.pDepthStencilState = nullptr; gp_ci_.pColorBlendState = &cb_ci_; gp_ci_.pDynamicState = nullptr; gp_ci_.renderPass = layer_test_.renderPass(); } void InitPipelineCacheInfo() { pc_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci_.pNext = nullptr; pc_ci_.flags = 0; pc_ci_.initialDataSize = 0; pc_ci_.pInitialData = nullptr; } // Not called by default during init_info void InitTesselationState() { // TBD -- add shaders and create_info } // TDB -- add control for optional and/or additional initialization void InitInfo() { InitDescriptorSetInfo(); InitInputAndVertexInfo(); InitMultisampleInfo(); InitPipelineLayoutInfo(); InitViewportInfo(); InitDynamicStateInfo(); InitShaderInfo(); InitRasterizationInfo(); InitBlendStateInfo(); InitGraphicsPipelineInfo(); InitPipelineCacheInfo(); } void InitState() { VkResult err; descriptor_set_.reset(new OneOffDescriptorSet(layer_test_.DeviceObj(), dsl_bindings_)); ASSERT_TRUE(descriptor_set_->Initialized()); const std::vector<VkPushConstantRange> push_ranges( pipeline_layout_ci_.pPushConstantRanges, pipeline_layout_ci_.pPushConstantRanges + pipeline_layout_ci_.pushConstantRangeCount); pipeline_layout_ = VkPipelineLayoutObj(layer_test_.DeviceObj(), {&descriptor_set_->layout_}, push_ranges); err = vkCreatePipelineCache(layer_test_.device(), &pc_ci_, NULL, &pipeline_cache_); ASSERT_VK_SUCCESS(err); } void LateBindPipelineInfo() { // By value or dynamically located items must be late bound gp_ci_.layout = pipeline_layout_.handle(); gp_ci_.stageCount = shader_stages_.size(); gp_ci_.pStages = shader_stages_.data(); if ((gp_ci_.pTessellationState == nullptr) && IsValidVkStruct(tess_ci_)) { gp_ci_.pTessellationState = &tess_ci_; } if ((gp_ci_.pDynamicState == nullptr) && IsValidVkStruct(dyn_state_ci_)) { gp_ci_.pDynamicState = &dyn_state_ci_; } } VkResult CreateGraphicsPipeline(bool implicit_destroy = true, bool do_late_bind = true) { VkResult err; if (do_late_bind) { LateBindPipelineInfo(); } if (implicit_destroy && (pipeline_ != VK_NULL_HANDLE)) { vkDestroyPipeline(layer_test_.device(), pipeline_, nullptr); pipeline_ = VK_NULL_HANDLE; } err = vkCreateGraphicsPipelines(layer_test_.device(), pipeline_cache_, 1, &gp_ci_, NULL, &pipeline_); return err; } // Helper function to create a simple test case (positive or negative) // // info_override can be any callable that takes a CreatePipelineHeper & // flags, error can be any args accepted by "SetDesiredFailure". template <typename Test, typename OverrideFunc, typename Error> static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, const std::vector<Error> &errors, bool positive_test = false) { CreatePipelineHelper helper(test); helper.InitInfo(); info_override(helper); helper.InitState(); for (const auto &error : errors) test.Monitor()->SetDesiredFailureMsg(flags, error); helper.CreateGraphicsPipeline(); if (positive_test) { test.Monitor()->VerifyNotFound(); } else { test.Monitor()->VerifyFound(); } } template <typename Test, typename OverrideFunc, typename Error> static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, Error error, bool positive_test = false) { OneshotTest(test, info_override, flags, std::vector<Error>(1, error), positive_test); } }; namespace chain_util { template <typename T> T Init(const void *pnext_in = nullptr) { T pnext_obj = {}; pnext_obj.sType = LvlTypeMap<T>::kSType; pnext_obj.pNext = pnext_in; return pnext_obj; } class ExtensionChain { const void *head_ = nullptr; typedef std::function<bool(const char *)> AddIfFunction; AddIfFunction add_if_; typedef std::vector<const char *> List; List *list_; public: template <typename F> ExtensionChain(F &add_if, List *list) : add_if_(add_if), list_(list) {} template <typename T> void Add(const char *name, T &obj) { if (add_if_(name)) { if (list_) { list_->push_back(name); } obj.pNext = head_; head_ = &obj; } } const void *Head() const { return head_; } }; } // namespace chain_util // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** // ******************************************************************************************************************** TEST_F(VkLayerTest, RequiredParameter) { TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFeatures specified as NULL"); // Specify NULL for a pointer to a handle // Expected to trigger an error with // parameter_validation::validate_required_pointer vkGetPhysicalDeviceFeatures(gpu(), NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pQueueFamilyPropertyCount specified as NULL"); // Specify NULL for pointer to array count // Expected to trigger an error with parameter_validation::validate_array vkGetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); // Specify 0 for a required array count // Expected to trigger an error with parameter_validation::validate_array VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_commandBuffer->SetViewport(0, 0, &viewport); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); // Specify NULL for a required array // Expected to trigger an error with parameter_validation::validate_array m_commandBuffer->SetViewport(0, 1, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter memory specified as VK_NULL_HANDLE"); // Specify VK_NULL_HANDLE for a required handle // Expected to trigger an error with // parameter_validation::validate_required_handle vkUnmapMemory(device(), VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFences[0] specified as VK_NULL_HANDLE"); // Specify VK_NULL_HANDLE for a required handle array entry // Expected to trigger an error with // parameter_validation::validate_required_handle_array VkFence fence = VK_NULL_HANDLE; vkResetFences(device(), 1, &fence); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pAllocateInfo specified as NULL"); // Specify NULL for a required struct pointer // Expected to trigger an error with // parameter_validation::validate_struct_type VkDeviceMemory memory = VK_NULL_HANDLE; vkAllocateMemory(device(), NULL, NULL, &memory); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of faceMask must not be 0"); // Specify 0 for a required VkFlags parameter // Expected to trigger an error with parameter_validation::validate_flags m_commandBuffer->SetStencilReference(0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0"); // Specify 0 for a required VkFlags array entry // Expected to trigger an error with // parameter_validation::validate_flags_array VkSemaphore semaphore = VK_NULL_HANDLE; VkPipelineStageFlags stageFlags = 0; VkSubmitInfo submitInfo = {}; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.waitSemaphoreCount = 1; submitInfo.pWaitSemaphores = &semaphore; submitInfo.pWaitDstStageMask = &stageFlags; vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ReservedParameter) { TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " must be 0"); // Specify 0 for a reserved VkFlags parameter // Expected to trigger an error with // parameter_validation::validate_reserved_flags VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; event_info.flags = 1; vkCreateEvent(device(), &event_info, NULL, &event_handle); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DebugMarkerNameTest) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME); } else { printf("%s Debug Marker Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)vkGetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT"); if (!(fpvkDebugMarkerSetObjectNameEXT)) { printf("%s Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n", kSkipPrefix); return; } VkEvent event_handle = VK_NULL_HANDLE; VkEventCreateInfo event_info = {}; event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(device(), &event_info, NULL, &event_handle); VkDebugMarkerObjectNameInfoEXT name_info = {}; name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT; name_info.pNext = nullptr; name_info.object = (uint64_t)event_handle; name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT; name_info.pObjectName = "UnimaginablyImprobableString"; fpvkDebugMarkerSetObjectNameEXT(device(), &name_info); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event_handle, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UnimaginablyImprobableString"); vkDestroyEvent(m_device->device(), event_handle, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, InvalidStructSType) { TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pAllocateInfo->sType must be"); // Zero struct memory, effectively setting sType to // VK_STRUCTURE_TYPE_APPLICATION_INFO // Expected to trigger an error with // parameter_validation::validate_struct_type VkMemoryAllocateInfo alloc_info = {}; VkDeviceMemory memory = VK_NULL_HANDLE; vkAllocateMemory(device(), &alloc_info, NULL, &memory); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pSubmits[0].sType must be"); // Zero struct memory, effectively setting sType to // VK_STRUCTURE_TYPE_APPLICATION_INFO // Expected to trigger an error with // parameter_validation::validate_struct_type_array VkSubmitInfo submit_info = {}; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidStructPNext) { TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "value of pCreateInfo->pNext must be NULL"); // Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL. // Need to pick a function that has no allowed pNext structure types. // Expected to trigger an error with parameter_validation::validate_struct_pnext VkEvent event = VK_NULL_HANDLE; VkEventCreateInfo event_alloc_info = {}; // Zero-initialization will provide the correct sType VkApplicationInfo app_info = {}; event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; event_alloc_info.pNext = &app_info; vkCreateEvent(device(), &event_alloc_info, NULL, &event); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " chain includes a structure with unexpected VkStructureType "); // Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use // a function that has allowed pNext structure types and specify // a structure type that is not allowed. // Expected to trigger an error with parameter_validation::validate_struct_pnext VkDeviceMemory memory = VK_NULL_HANDLE; VkMemoryAllocateInfo memory_alloc_info = {}; memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_alloc_info.pNext = &app_info; vkAllocateMemory(device(), &memory_alloc_info, NULL, &memory); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range of the core VkFormat enumeration tokens"); // Specify an invalid VkFormat value // Expected to trigger an error with // parameter_validation::validate_ranged_enum VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), static_cast<VkFormat>(8000), &format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadMask) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of"); // Specify an invalid VkFlags bitmask value // Expected to trigger an error with parameter_validation::validate_flags VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, static_cast<VkImageUsageFlags>(1 << 25), 0, &image_format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadFlag) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of"); // Specify an invalid VkFlags array entry // Expected to trigger an error with // parameter_validation::validate_flags_array VkSemaphore semaphore = VK_NULL_HANDLE; VkPipelineStageFlags stage_flags = static_cast<VkPipelineStageFlags>(1 << 25); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = &stage_flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UnrecognizedValueBadBool) { // Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME); } else { printf("%s VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE"); // Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32 VkSampler sampler = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; // Not VK_TRUE or VK_FALSE sampler_info.anisotropyEnable = 3; vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MirrorClampToEdgeNotEnabled) { TEST_DESCRIPTION("Validation should catch using CLAMP_TO_EDGE addressing mode if the extension is not enabled."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-addressModeU-01079"); VkSampler sampler = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); // Set the modes to cause the error sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE; vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AnisotropyFeatureDisabled) { TEST_DESCRIPTION("Validation should check anisotropy parameters are correct with samplerAnisotropy disabled."); // Determine if required device features are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); device_features.samplerAnisotropy = VK_FALSE; // force anisotropy off ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-anisotropyEnable-01070"); VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); // With the samplerAnisotropy disable, the sampler must not enable it. sampler_info.anisotropyEnable = VK_TRUE; VkSampler sampler = VK_NULL_HANDLE; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } sampler = VK_NULL_HANDLE; } TEST_F(VkLayerTest, AnisotropyFeatureEnabled) { TEST_DESCRIPTION("Validation must check several conditions that apply only when Anisotropy is enabled."); // Determine if required device features are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // These tests require that the device support anisotropic filtering if (VK_TRUE != device_features.samplerAnisotropy) { printf("%s Test requires unsupported samplerAnisotropy feature. Skipped.\n", kSkipPrefix); return; } bool cubic_support = false; if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) { m_device_extension_names.push_back("VK_IMG_filter_cubic"); cubic_support = true; } VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo(); sampler_info_ref.anisotropyEnable = VK_TRUE; VkSamplerCreateInfo sampler_info = sampler_info_ref; ASSERT_NO_FATAL_FAILURE(InitState()); auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void { VkResult err; VkSampler sampler = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code); err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } }; // maxAnisotropy out-of-bounds low. sampler_info.maxAnisotropy = NearestSmaller(1.0F); do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info); sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy; // maxAnisotropy out-of-bounds high. sampler_info.maxAnisotropy = NearestGreater(m_device->phy().properties().limits.maxSamplerAnisotropy); do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info); sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy; // Both anisotropy and unnormalized coords enabled sampler_info.unnormalizedCoordinates = VK_TRUE; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076", &sampler_info); sampler_info.unnormalizedCoordinates = sampler_info_ref.unnormalizedCoordinates; // Both anisotropy and cubic filtering enabled if (cubic_support) { sampler_info.minFilter = VK_FILTER_CUBIC_IMG; do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info); sampler_info.minFilter = sampler_info_ref.minFilter; sampler_info.magFilter = VK_FILTER_CUBIC_IMG; do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info); sampler_info.magFilter = sampler_info_ref.magFilter; } else { printf("%s Test requires unsupported extension \"VK_IMG_filter_cubic\". Skipped.\n", kSkipPrefix); } } TEST_F(VkLayerTest, UnnormalizedCoordinatesEnabled) { TEST_DESCRIPTION("Validate restrictions on sampler parameters when unnormalizedCoordinates is true."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo(); sampler_info_ref.unnormalizedCoordinates = VK_TRUE; sampler_info_ref.minLod = 0.0f; sampler_info_ref.maxLod = 0.0f; VkSamplerCreateInfo sampler_info = sampler_info_ref; ASSERT_NO_FATAL_FAILURE(InitState()); auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void { VkResult err; VkSampler sampler = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code); err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == err) { vkDestroySampler(m_device->device(), sampler, NULL); } }; // min and mag filters must be the same sampler_info.minFilter = VK_FILTER_NEAREST; sampler_info.magFilter = VK_FILTER_LINEAR; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", &sampler_info); std::swap(sampler_info.minFilter, sampler_info.magFilter); do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01072", &sampler_info); sampler_info = sampler_info_ref; // mipmapMode must be NEAREST sampler_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01073", &sampler_info); sampler_info = sampler_info_ref; // minlod and maxlod must be zero sampler_info.maxLod = 3.14159f; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", &sampler_info); sampler_info.minLod = 2.71828f; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01074", &sampler_info); sampler_info = sampler_info_ref; // addressModeU and addressModeV must both be CLAMP_TO_EDGE or CLAMP_TO_BORDER // checks all 12 invalid combinations out of 16 total combinations const std::array<VkSamplerAddressMode, 4> kAddressModes = {{ VK_SAMPLER_ADDRESS_MODE_REPEAT, VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, }}; for (const auto umode : kAddressModes) { for (const auto vmode : kAddressModes) { if ((umode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && umode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) || (vmode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE && vmode != VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER)) { sampler_info.addressModeU = umode; sampler_info.addressModeV = vmode; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01075", &sampler_info); } } } sampler_info = sampler_info_ref; // VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076 is tested in AnisotropyFeatureEnabled above // Since it requires checking/enabling the anisotropic filtering feature, it's easier to do it // with the other anisotropic tests. // compareEnable must be VK_FALSE sampler_info.compareEnable = VK_TRUE; do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01077", &sampler_info); sampler_info = sampler_info_ref; } TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) { ASSERT_NO_FATAL_FAILURE(Init()); // Specify MAX_ENUM VkFormatProperties format_properties; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range"); vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UpdateBufferAlignment) { TEST_DESCRIPTION("Check alignment parameters for vkCmdUpdateBuffer"); uint32_t updateData[] = {1, 2, 3, 4, 5, 6, 7, 8}; ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj buffer; buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs); m_commandBuffer->begin(); // Introduce failure by using dstOffset that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->UpdateBuffer(buffer.handle(), 1, 4, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, 6, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is < 0 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero and less than or equal to 65536"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)-44, updateData); m_errorMonitor->VerifyFound(); // Introduce failure by using dataSize that is > 65536 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero and less than or equal to 65536"); m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)80000, updateData); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, FillBufferAlignment) { TEST_DESCRIPTION("Check alignment parameters for vkCmdFillBuffer"); ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj buffer; buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs); m_commandBuffer->begin(); // Introduce failure by using dstOffset that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->FillBuffer(buffer.handle(), 1, 4, 0x11111111); m_errorMonitor->VerifyFound(); // Introduce failure by using size that is not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4"); m_commandBuffer->FillBuffer(buffer.handle(), 0, 6, 0x11111111); m_errorMonitor->VerifyFound(); // Introduce failure by using size that is zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero"); m_commandBuffer->FillBuffer(buffer.handle(), 0, 0, 0x11111111); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOPolygonModeInvalid) { TEST_DESCRIPTION("Attempt to use a non-solid polygon fill mode in a pipeline when this feature is not enabled."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Artificially disable support for non-solid fill modes features.fillModeNonSolid = VK_FALSE; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkRenderpassObj render_pass(&test_device); const VkPipelineLayoutObj pipeline_layout(&test_device); VkPipelineRasterizationStateCreateInfo rs_ci = {}; rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_ci.pNext = nullptr; rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = VK_TRUE; VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set polygonMode to unsupported value POINT, should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE"); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_POINT; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyFound(); // Try again with polygonMode=LINE, should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE"); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_LINE; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SparseBindingImageBufferCreate) { TEST_DESCRIPTION("Create buffer/image with sparse attributes but without the sparse_binding bit set"); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buf_info.size = 2048; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (m_device->phy().features().sparseResidencyBuffer) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918"); buf_info.flags = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT; vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyBuffer feature. Skipped.\n", kSkipPrefix); return; } if (m_device->phy().features().sparseResidencyAliased) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918"); buf_info.flags = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT; vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix); return; } VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 512; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; if (m_device->phy().features().sparseResidencyImage2D) { image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987"); vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyImage2D feature. Skipped.\n", kSkipPrefix); return; } if (m_device->phy().features().sparseResidencyAliased) { image_create_info.flags = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987"); vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } else { printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix); return; } } TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedTypes) { TEST_DESCRIPTION("Create images with sparse residency with unsupported types"); // Determine which device feature are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // Mask out device features we don't want and initialize device state device_features.sparseResidencyImage2D = VK_FALSE; device_features.sparseResidencyImage3D = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); if (!m_device->phy().features().sparseBinding) { printf("%s Test requires unsupported sparseBinding feature. Skipped.\n", kSkipPrefix); return; } VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_1D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 512; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // 1D image w/ sparse residency is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00970"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 2D image w/ sparse residency when feature isn't available image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.extent.height = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00971"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } // 3D image w/ sparse residency when feature isn't available image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.extent.depth = 8; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00972"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedSamples) { TEST_DESCRIPTION("Create images with sparse residency with unsupported tiling or sample counts"); // Determine which device feature are available VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // These tests require that the device support sparse residency for 2D images if (VK_TRUE != device_features.sparseResidencyImage2D) { printf("%s Test requires unsupported SparseResidencyImage2D feature. Skipped.\n", kSkipPrefix); return; } // Mask out device features we don't want and initialize device state device_features.sparseResidency2Samples = VK_FALSE; device_features.sparseResidency4Samples = VK_FALSE; device_features.sparseResidency8Samples = VK_FALSE; device_features.sparseResidency16Samples = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); VkImage image = VK_NULL_HANDLE; VkResult result = VK_RESULT_MAX_ENUM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT; // 2D image w/ sparse residency and linear tiling is an error m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image tiling of VK_IMAGE_TILING_LINEAR is not supported"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Multi-sample image w/ sparse residency when feature isn't available (4 flavors) image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00973"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00974"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_8_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00975"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } image_create_info.samples = VK_SAMPLE_COUNT_16_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00976"); result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == result) { vkDestroyImage(m_device->device(), image, NULL); image = VK_NULL_HANDLE; } } TEST_F(VkLayerTest, InvalidMemoryAliasing) { TEST_DESCRIPTION( "Create a buffer and image, allocate memory, and bind the buffer and image to memory such that they will alias."); VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer, buffer2; VkImage image; VkImage image2; VkDeviceMemory mem; // buffer will be bound first VkDeviceMemory mem_img; // image bound first VkMemoryRequirements buff_mem_reqs, img_mem_reqs; VkMemoryRequirements buff_mem_reqs2, img_mem_reqs2; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &buff_mem_reqs); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; // Image tiling must be optimal to trigger error when aliasing linear buffer image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &img_mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings alloc_info.allocationSize = buff_mem_reqs.size + img_mem_reqs.size; pass = m_device->phy().set_memory_type(buff_mem_reqs.memoryTypeBits & img_mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); vkDestroyImage(m_device->device(), image, NULL); vkDestroyImage(m_device->device(), image2, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image2, &img_mem_reqs2); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " is aliased with linear buffer 0x"); // VALIDATION FAILURE due to image mapping overlapping buffer mapping err = vkBindImageMemory(m_device->device(), image, mem, 0); m_errorMonitor->VerifyFound(); // Now correctly bind image2 to second mem allocation before incorrectly // aliasing buffer2 err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer2); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem_img); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image2, mem_img, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is aliased with non-linear image 0x"); vkGetBufferMemoryRequirements(m_device->device(), buffer2, &buff_mem_reqs2); err = vkBindBufferMemory(m_device->device(), buffer2, mem_img, 0); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkDestroyBuffer(m_device->device(), buffer2, NULL); vkDestroyImage(m_device->device(), image, NULL); vkDestroyImage(m_device->device(), image2, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkFreeMemory(m_device->device(), mem_img, NULL); } TEST_F(VkLayerTest, InvalidMemoryMapping) { TEST_DESCRIPTION("Attempt to map memory in a number of incorrect ways"); VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings static const VkDeviceSize allocation_size = 0x10000; alloc_info.allocationSize = allocation_size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); uint8_t *pData; // Attempt to map memory size 0 is invalid m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory range of size zero"); err = vkMapMemory(m_device->device(), mem, 0, 0, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Map memory twice err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory on an already-mapped object "); err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Unmap the memory to avoid re-map error vkUnmapMemory(m_device->device(), mem); // overstep allocation with VK_WHOLE_SIZE m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " with size of VK_WHOLE_SIZE oversteps total array size 0x"); err = vkMapMemory(m_device->device(), mem, allocation_size + 1, VK_WHOLE_SIZE, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // overstep allocation w/o VK_WHOLE_SIZE m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " oversteps total array size 0x"); err = vkMapMemory(m_device->device(), mem, 1, allocation_size, 0, (void **)&pData); m_errorMonitor->VerifyFound(); // Now error due to unmapping memory that's not mapped m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Unmapping Memory without memory being mapped: "); vkUnmapMemory(m_device->device(), mem); m_errorMonitor->VerifyFound(); // Now map memory and cause errors due to flushing invalid ranges err = vkMapMemory(m_device->device(), mem, 4 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); VkMappedMemoryRange mmr = {}; mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = atom_size; // Error b/c offset less than offset of mapped mem m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now flush range that oversteps mapped range vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = 4 * atom_size; // Flushing bounds exceed mapped bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now flush range with VK_WHOLE_SIZE that oversteps offset vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 2 * atom_size, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00686"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Some platforms have an atomsize of 1 which makes the test meaningless if (atom_size > 3) { // Now with an offset NOT a multiple of the device limit vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = 3; // Not a multiple of atom_size mmr.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-offset-00687"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); // Now with a size NOT a multiple of the device limit vkUnmapMemory(m_device->device(), mem); err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.offset = atom_size; mmr.size = 2 * atom_size + 1; // Not a multiple of atom_size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-01390"); vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); m_errorMonitor->VerifyFound(); } pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } // TODO : If we can get HOST_VISIBLE w/o HOST_COHERENT we can test cases of // MEMTRACK_INVALID_MAP in validateAndCopyNoncoherentMemoryToDriver() vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, MapMemWithoutHostVisibleBit) { TEST_DESCRIPTION("Allocate memory that is not mappable and then attempt to map it."); VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMapMemory-memory-00682"); ASSERT_NO_FATAL_FAILURE(Init()); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 1024; pass = m_device->phy().set_memory_type(0xFFFFFFFF, &mem_alloc, 0, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { // If we can't find any unmappable memory this test doesn't // make sense printf("%s No unmappable memory types found, skipping test\n", kSkipPrefix); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); void *mappedAddress = NULL; err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, &mappedAddress); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, RebindMemory) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which has already been bound to mem object"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image, allocate memory, free it, and then try to bind it VkImage image; VkDeviceMemory mem1; VkDeviceMemory mem2; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; // Introduce failure, do NOT set memProps to // VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT mem_alloc.memoryTypeIndex = 1; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); // allocate 2 memory objects err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem1); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem2); ASSERT_VK_SUCCESS(err); // Bind first memory object to Image object err = vkBindImageMemory(m_device->device(), image, mem1, 0); ASSERT_VK_SUCCESS(err); // Introduce validation failure, try to bind a different memory object to // the same image object err = vkBindImageMemory(m_device->device(), image, mem2, 0); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), mem1, NULL); vkFreeMemory(m_device->device(), mem2, NULL); } TEST_F(VkLayerTest, SubmitSignaledFence) { vk_testing::Fence testFence; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "submitted in SIGNALED state. Fences must be reset before being submitted"); VkFenceCreateInfo fenceInfo = {}; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->end(); testFence.init(*m_device, fenceInfo); VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; vkQueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle()); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidUsageBits) { TEST_DESCRIPTION( "Specify wrong usage for image then create conflicting view of image Initialize buffer with wrong usage then perform copy " "expecting errors from both the image and the buffer (2 calls)"); ASSERT_NO_FATAL_FAILURE(Init()); auto format = FindSupportedDepthStencilFormat(gpu()); if (!format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); // Initialize image with transfer source usage image.Init(128, 128, 1, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView dsv; VkImageViewCreateInfo dsvci = {}; dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; dsvci.image = image.handle(); dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D; dsvci.format = format; dsvci.subresourceRange.layerCount = 1; dsvci.subresourceRange.baseMipLevel = 0; dsvci.subresourceRange.levelCount = 1; dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; // Create a view with depth / stencil aspect for image with different usage m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid usage flag for Image "); vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv); m_errorMonitor->VerifyFound(); // Initialize buffer with TRANSFER_DST usage VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_dst(*m_device, 128 * 128, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 128; region.bufferImageHeight = 128; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = 16; region.imageExtent.width = 16; region.imageExtent.depth = 1; // Buffer usage not set to TRANSFER_SRC and image usage not set to TRANSFER_DST m_commandBuffer->begin(); // two separate errors from this call: m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImage-00177"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-srcBuffer-00174"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, LeakAnObject) { VkResult err; TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence."); // Note that we have to create a new device since destroying the // framework's device causes Teardown() to fail and just calling Teardown // will destroy the errorMonitor. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has not been destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); // The sacrificial device object VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; err = vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice); ASSERT_VK_SUCCESS(err); VkFence fence; VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_create_info.pNext = NULL; fence_create_info.flags = 0; err = vkCreateFence(testDevice, &fence_create_info, NULL, &fence); ASSERT_VK_SUCCESS(err); // Induce failure by not calling vkDestroyFence vkDestroyDevice(testDevice, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCommandPoolConsistency) { TEST_DESCRIPTION("Allocate command buffers from one command pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeCommandBuffers is attempting to free Command Buffer"); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandPool command_pool_one; VkCommandPool command_pool_two; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_one); vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_two); VkCommandBuffer cb; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool_one; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &cb); vkFreeCommandBuffers(m_device->device(), command_pool_two, 1, &cb); m_errorMonitor->VerifyFound(); vkDestroyCommandPool(m_device->device(), command_pool_one, NULL); vkDestroyCommandPool(m_device->device(), command_pool_two, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPoolConsistency) { VkResult err; TEST_DESCRIPTION("Allocate descriptor sets from one DS pool and attempt to delete them from another."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeDescriptorSets is attempting to free descriptorSet"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool bad_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &bad_pool); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); err = vkFreeDescriptorSets(m_device->device(), bad_pool, 1, &ds.set_); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), bad_pool, NULL); } TEST_F(VkLayerTest, CreateUnknownObject) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageMemoryRequirements-image-parameter"); TEST_DESCRIPTION("Pass an invalid image object handle into a Vulkan API call."); ASSERT_NO_FATAL_FAILURE(Init()); // Pass bogus handle into GetImageMemoryRequirements VkMemoryRequirements mem_reqs; uint64_t fakeImageHandle = 0xCADECADE; VkImage fauxImage = reinterpret_cast<VkImage &>(fakeImageHandle); vkGetImageMemoryRequirements(m_device->device(), fauxImage, &mem_reqs); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, UseObjectWithWrongDevice) { TEST_DESCRIPTION( "Try to destroy a render pass object using a device other than the one it was created on. This should generate a distinct " "error from the invalid handle error."); // Create first device and renderpass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create second device float priorities[] = {1.0f}; VkDeviceQueueCreateInfo queue_info{}; queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_info.pNext = NULL; queue_info.flags = 0; queue_info.queueFamilyIndex = 0; queue_info.queueCount = 1; queue_info.pQueuePriorities = &priorities[0]; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = 1; device_create_info.pQueueCreateInfos = &queue_info; device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; VkDevice second_device; ASSERT_VK_SUCCESS(vkCreateDevice(gpu(), &device_create_info, NULL, &second_device)); // Try to destroy the renderpass from the first device using the second device m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-parent"); vkDestroyRenderPass(second_device, m_renderPass, NULL); m_errorMonitor->VerifyFound(); vkDestroyDevice(second_device, NULL); } TEST_F(VkLayerTest, PipelineNotBound) { TEST_DESCRIPTION("Pass in an invalid pipeline object handle into a Vulkan API call."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipeline badPipeline = (VkPipeline)((size_t)0xbaadb1be); m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, badPipeline); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BindImageInvalidMemoryType) { VkResult err; TEST_DESCRIPTION("Test validation check for an invalid memory type index during bind[Buffer|Image]Memory time"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image, allocate memory, set a bad typeIndex and then try to // bind it VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; // Introduce Failure, select invalid TypeIndex VkPhysicalDeviceMemoryProperties memory_info; vkGetPhysicalDeviceMemoryProperties(gpu(), &memory_info); unsigned int i; for (i = 0; i < memory_info.memoryTypeCount; i++) { if ((mem_reqs.memoryTypeBits & (1 << i)) == 0) { mem_alloc.memoryTypeIndex = i; break; } } if (i >= memory_info.memoryTypeCount) { printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix); vkDestroyImage(m_device->device(), image, NULL); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "for this object type are not compatible with the memory"); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, mem, 0); (void)err; m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, BindInvalidMemory) { VkResult err; bool pass; ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM; const int32_t tex_width = 256; const int32_t tex_height = 256; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = NULL; buffer_create_info.flags = 0; buffer_create_info.size = 4 * 1024 * 1024; buffer_create_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT; buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; // Create an image/buffer, allocate memory, free it, and then try to bind it { VkImage image = VK_NULL_HANDLE; VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_mem_alloc = {}, buffer_mem_alloc = {}; image_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_mem_alloc.allocationSize = image_mem_reqs.size; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_mem_alloc, 0); ASSERT_TRUE(pass); buffer_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_mem_alloc.allocationSize = buffer_mem_reqs.size; pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem = VK_NULL_HANDLE, buffer_mem = VK_NULL_HANDLE; err = vkAllocateMemory(device(), &image_mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_mem_alloc, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-parameter"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-parameter"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } // Try to bind memory to an object that already has a memory binding { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.allocationSize = image_mem_reqs.size; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0); ASSERT_TRUE(pass); pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem, buffer_mem; err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(device(), image, image_mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01044"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01029"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an object with an invalid memoryOffset { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; // Leave some extra space for alignment wiggle room image_alloc_info.allocationSize = image_mem_reqs.size + image_mem_reqs.alignment; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size + buffer_mem_reqs.alignment; pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0); ASSERT_TRUE(pass); pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); VkDeviceMemory image_mem, buffer_mem; err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); // Test unaligned memory offset { if (image_mem_reqs.alignment > 1) { VkDeviceSize image_offset = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01048"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } if (buffer_mem_reqs.alignment > 1) { VkDeviceSize buffer_offset = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01036"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } } // Test memory offsets outside the memory allocation { VkDeviceSize image_offset = (image_alloc_info.allocationSize + image_mem_reqs.alignment) & ~(image_mem_reqs.alignment - 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01046"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); VkDeviceSize buffer_offset = (buffer_alloc_info.allocationSize + buffer_mem_reqs.alignment) & ~(buffer_mem_reqs.alignment - 1); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01031"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } // Test memory offsets within the memory allocation, but which leave too little memory for // the resource. { VkDeviceSize image_offset = (image_mem_reqs.size - 1) & ~(image_mem_reqs.alignment - 1); if ((image_offset > 0) && (image_mem_reqs.size < (image_alloc_info.allocationSize - image_mem_reqs.alignment))) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049"); err = vkBindImageMemory(device(), image, image_mem, image_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } VkDeviceSize buffer_offset = (buffer_mem_reqs.size - 1) & ~(buffer_mem_reqs.alignment - 1); if (buffer_offset > 0) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037"); err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); } } vkFreeMemory(device(), image_mem, NULL); vkFreeMemory(device(), buffer_mem, NULL); vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an object with an invalid memory type { VkImage image = VK_NULL_HANDLE; err = vkCreateImage(device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkBuffer buffer = VK_NULL_HANDLE; err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {}; vkGetImageMemoryRequirements(device(), image, &image_mem_reqs); vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.allocationSize = image_mem_reqs.size; buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; buffer_alloc_info.allocationSize = buffer_mem_reqs.size; // Create a mask of available memory types *not* supported by these resources, // and try to use one of them. VkPhysicalDeviceMemoryProperties memory_properties = {}; vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memory_properties); VkDeviceMemory image_mem, buffer_mem; uint32_t image_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~image_mem_reqs.memoryTypeBits; if (image_unsupported_mem_type_bits != 0) { pass = m_device->phy().set_memory_type(image_unsupported_mem_type_bits, &image_alloc_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01047"); err = vkBindImageMemory(device(), image, image_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), image_mem, NULL); } uint32_t buffer_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~buffer_mem_reqs.memoryTypeBits; if (buffer_unsupported_mem_type_bits != 0) { pass = m_device->phy().set_memory_type(buffer_unsupported_mem_type_bits, &buffer_alloc_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01035"); err = vkBindBufferMemory(device(), buffer, buffer_mem, 0); (void)err; // This may very well return an error. m_errorMonitor->VerifyFound(); vkFreeMemory(device(), buffer_mem, NULL); } vkDestroyImage(device(), image, NULL); vkDestroyBuffer(device(), buffer, NULL); } // Try to bind memory to an image created with sparse memory flags { VkImageCreateInfo sparse_image_create_info = image_create_info; sparse_image_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; VkImageFormatProperties image_format_properties = {}; err = vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), sparse_image_create_info.format, sparse_image_create_info.imageType, sparse_image_create_info.tiling, sparse_image_create_info.usage, sparse_image_create_info.flags, &image_format_properties); if (!m_device->phy().features().sparseResidencyImage2D || err == VK_ERROR_FORMAT_NOT_SUPPORTED) { // most likely means sparse formats aren't supported here; skip this test. } else { ASSERT_VK_SUCCESS(err); if (image_format_properties.maxExtent.width == 0) { printf("%s Sparse image format not supported; skipped.\n", kSkipPrefix); return; } else { VkImage sparse_image = VK_NULL_HANDLE; err = vkCreateImage(m_device->device(), &sparse_image_create_info, NULL, &sparse_image); ASSERT_VK_SUCCESS(err); VkMemoryRequirements sparse_mem_reqs = {}; vkGetImageMemoryRequirements(m_device->device(), sparse_image, &sparse_mem_reqs); if (sparse_mem_reqs.memoryTypeBits != 0) { VkMemoryAllocateInfo sparse_mem_alloc = {}; sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; sparse_mem_alloc.pNext = NULL; sparse_mem_alloc.allocationSize = sparse_mem_reqs.size; sparse_mem_alloc.memoryTypeIndex = 0; pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory sparse_mem = VK_NULL_HANDLE; err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01045"); err = vkBindImageMemory(m_device->device(), sparse_image, sparse_mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), sparse_mem, NULL); } vkDestroyImage(m_device->device(), sparse_image, NULL); } } } // Try to bind memory to a buffer created with sparse memory flags { VkBufferCreateInfo sparse_buffer_create_info = buffer_create_info; sparse_buffer_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT; if (!m_device->phy().features().sparseResidencyBuffer) { // most likely means sparse formats aren't supported here; skip this test. } else { VkBuffer sparse_buffer = VK_NULL_HANDLE; err = vkCreateBuffer(m_device->device(), &sparse_buffer_create_info, NULL, &sparse_buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements sparse_mem_reqs = {}; vkGetBufferMemoryRequirements(m_device->device(), sparse_buffer, &sparse_mem_reqs); if (sparse_mem_reqs.memoryTypeBits != 0) { VkMemoryAllocateInfo sparse_mem_alloc = {}; sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; sparse_mem_alloc.pNext = NULL; sparse_mem_alloc.allocationSize = sparse_mem_reqs.size; sparse_mem_alloc.memoryTypeIndex = 0; pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0); ASSERT_TRUE(pass); VkDeviceMemory sparse_mem = VK_NULL_HANDLE; err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01030"); err = vkBindBufferMemory(m_device->device(), sparse_buffer, sparse_mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), sparse_mem, NULL); } vkDestroyBuffer(m_device->device(), sparse_buffer, NULL); } } } TEST_F(VkLayerTest, BindMemoryToDestroyedObject) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image object, allocate memory, destroy the object and then try // to bind it VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); // Allocate memory err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); // Introduce validation failure, destroy Image object before binding vkDestroyImage(m_device->device(), image, NULL); ASSERT_VK_SUCCESS(err); // Now Try to bind memory to this destroyed object err = vkBindImageMemory(m_device->device(), image, mem, 0); // This may very well return an error. (void)err; m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, ExceedMemoryAllocationCount) { VkResult err = VK_SUCCESS; const int max_mems = 32; VkDeviceMemory mems[max_mems + 1]; if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = (PFN_vkGetOriginalPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceLimitsEXT"); if (!(fpvkSetPhysicalDeviceLimitsEXT) || !(fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); if (props.limits.maxMemoryAllocationCount > max_mems) { props.limits.maxMemoryAllocationCount = max_mems; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Number of currently valid memory objects is not less than the maximum allowed"); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; mem_alloc.allocationSize = 4; int i; for (i = 0; i <= max_mems; i++) { err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mems[i]); if (err != VK_SUCCESS) { break; } } m_errorMonitor->VerifyFound(); for (int j = 0; j < i; j++) { vkFreeMemory(m_device->device(), mems[j], NULL); } } TEST_F(VkLayerTest, CreatePipelineBadVertexAttributeFormat) { TEST_DESCRIPTION("Test that pipeline validation catches invalid vertex attribute formats"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs; memset(&input_attribs, 0, sizeof(input_attribs)); // Pick a really bad format for this purpose and make sure it should fail input_attribs.format = VK_FORMAT_BC2_UNORM_BLOCK; VkFormatProperties format_props = m_device->format_properties(input_attribs.format); if ((format_props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != 0) { printf("%s Format unsuitable for test; skipped.\n", kSkipPrefix); return; } input_attribs.location = 0; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-format-00623"); VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attribs, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageSampleCounts) { TEST_DESCRIPTION("Use bad sample counts in image transfer calls to trigger validation errors."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkMemoryPropertyFlags reqs = 0; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 256; image_create_info.extent.height = 256; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.flags = 0; VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {256, 256, 1}; blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {128, 128, 1}; // Create two images, the source with sampleCount = 4, and attempt to blit // between them { image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&image_create_info); src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); // TODO: These 2 VUs are redundant - expect one of them to go away m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } // Create two images, the dest with sampleCount = 4, and attempt to blit // between them { image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&image_create_info); src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); // TODO: These 2 VUs are redundant - expect one of them to go away m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; // Create src buffer and dst image with sampleCount = 4 and attempt to copy // buffer to image { VkBufferObj src_buffer; src_buffer.init_as_src(*m_device, 128 * 128 * 4, reqs); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&image_create_info); dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), src_buffer.handle(), dst_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } // Create dst buffer and src image with sampleCount = 4 and attempt to copy // image to buffer { VkBufferObj dst_buffer; dst_buffer.init_as_dst(*m_device, 128 * 128 * 4, reqs); image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; vk_testing::Image src_image; src_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_buffer.handle(), 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } } TEST_F(VkLayerTest, BlitImageFormatTypes) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat f_unsigned = VK_FORMAT_R8G8B8A8_UINT; VkFormat f_signed = VK_FORMAT_R8G8B8A8_SINT; VkFormat f_float = VK_FORMAT_R32_SFLOAT; VkFormat f_depth = VK_FORMAT_D32_SFLOAT_S8_UINT; VkFormat f_depth2 = VK_FORMAT_D32_SFLOAT; if (!ImageFormatIsSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL) || !ImageFormatIsSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL)) { printf("%s Requested formats not supported - BlitImageFormatTypes skipped.\n", kSkipPrefix); return; } // Note any missing feature bits bool usrc = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool udst = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool ssrc = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool sdst = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool fsrc = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); bool fdst = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool d1dst = !ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT); bool d2src = !ImageFormatAndFeaturesSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT); VkImageObj unsigned_image(m_device); unsigned_image.Init(64, 64, 1, f_unsigned, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(unsigned_image.initialized()); unsigned_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj signed_image(m_device); signed_image.Init(64, 64, 1, f_signed, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(signed_image.initialized()); signed_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj float_image(m_device); float_image.Init(64, 64, 1, f_float, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(float_image.initialized()); float_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj depth_image(m_device); depth_image.Init(64, 64, 1, f_depth, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(depth_image.initialized()); depth_image.SetLayout(VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj depth_image2(m_device); depth_image2.Init(64, 64, 1, f_depth2, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(depth_image2.initialized()); depth_image2.SetLayout(VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {64, 64, 1}; blitRegion.dstOffsets[0] = {0, 0, 0}; blitRegion.dstOffsets[1] = {32, 32, 1}; m_commandBuffer->begin(); // Unsigned int vs not an int m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), float_image.image(), float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), unsigned_image.image(), unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Signed int vs not an int, m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), float_image.image(), float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), signed_image.image(), signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Signed vs Unsigned int - generates both VUs m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), unsigned_image.image(), unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230"); if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), signed_image.image(), signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Depth vs any non-identical depth format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00231"); blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; if (d2src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218"); if (d1dst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), depth_image2.image(), depth_image2.Layout(), depth_image.image(), depth_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitImageFilters) { bool cubic_support = false; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) { m_device_extension_names.push_back("VK_IMG_filter_cubic"); cubic_support = true; } ASSERT_NO_FATAL_FAILURE(InitState()); VkFormat fmt = VK_FORMAT_R8_UINT; if (!ImageFormatIsSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL)) { printf("%s No R8_UINT format support - BlitImageFilters skipped.\n", kSkipPrefix); return; } // Create 2D images VkImageObj src2D(m_device); VkImageObj dst2D(m_device); src2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); dst2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(src2D.initialized()); ASSERT_TRUE(dst2D.initialized()); src2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); dst2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); // Create 3D image VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = fmt; ci.extent = {64, 64, 4}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj src3D(m_device); src3D.init(&ci); ASSERT_TRUE(src3D.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {48, 48, 1}; blitRegion.dstOffsets[0] = {0, 0, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // UINT format should not support linear filtering, but check to be sure if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00235"); vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); } if (cubic_support && !ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG)) { // Invalid filter CUBIC_IMG m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00236"); vkCmdBlitImage(m_commandBuffer->handle(), src3D.image(), src3D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_CUBIC_IMG); m_errorMonitor->VerifyFound(); // Invalid filter CUBIC_IMG + invalid 2D source image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00236"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00237"); vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion, VK_FILTER_CUBIC_IMG); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitImageLayout) { TEST_DESCRIPTION("Incorrect vkCmdBlitImage layouts"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkResult err; VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Create images VkImageObj img_src_transfer(m_device); VkImageObj img_dst_transfer(m_device); VkImageObj img_general(m_device); VkImageObj img_color(m_device); img_src_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_dst_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_general.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_color.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(img_src_transfer.initialized()); ASSERT_TRUE(img_dst_transfer.initialized()); ASSERT_TRUE(img_general.initialized()); ASSERT_TRUE(img_color.initialized()); img_src_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); img_dst_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); img_general.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); img_color.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {48, 48, 1}; blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Illegal srcImageLayout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00222"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); // Illegal destImageLayout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00227"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_commandBuffer->reset(0); m_commandBuffer->begin(); // Source image in invalid layout at start of the CB m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL when first use is VK_IMAGE_LAYOUT_GENERAL"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); m_commandBuffer->reset(0); m_commandBuffer->begin(); // Destination image in invalid layout at start of the CB m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL when first use is VK_IMAGE_LAYOUT_GENERAL"); vkCmdBlitImage(m_commandBuffer->handle(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); // Source image in invalid layout in the middle of CB m_commandBuffer->reset(0); m_commandBuffer->begin(); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = nullptr; img_barrier.srcAccessMask = 0; img_barrier.dstAccessMask = 0; img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.image = img_general.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00221"); vkCmdBlitImage(m_commandBuffer->handle(), img_general.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); // Destination image in invalid layout in the middle of CB m_commandBuffer->reset(0); m_commandBuffer->begin(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; img_barrier.image = img_dst_transfer.handle(); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00226"); vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); } TEST_F(VkLayerTest, BlitImageOffsets) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s No blit feature bits - BlitImageOffsets skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = fmt; ci.extent = {64, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {64, 64, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {64, 64, 64}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); VkImageBlit blit_region = {}; blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.srcSubresource.baseArrayLayer = 0; blit_region.srcSubresource.layerCount = 1; blit_region.srcSubresource.mipLevel = 0; blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blit_region.dstSubresource.baseArrayLayer = 0; blit_region.dstSubresource.layerCount = 1; blit_region.dstSubresource.mipLevel = 0; m_commandBuffer->begin(); // 1D, with src/dest y offsets other than (0,1) blit_region.srcOffsets[0] = {0, 1, 0}; blit_region.srcOffsets[1] = {30, 1, 1}; blit_region.dstOffsets[0] = {32, 0, 0}; blit_region.dstOffsets[1] = {64, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00245"); vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[0] = {32, 1, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00250"); vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // 2D, with src/dest z offsets other than (0,1) blit_region.srcOffsets[0] = {0, 0, 1}; blit_region.srcOffsets[1] = {24, 31, 1}; blit_region.dstOffsets[0] = {32, 32, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00247"); vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[0] = {32, 32, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00252"); vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Source offsets exceeding source image dimensions blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {65, 64, 1}; // src x blit_region.dstOffsets[0] = {0, 0, 0}; blit_region.dstOffsets[1] = {64, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[1] = {64, 65, 1}; // src y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.srcOffsets[0] = {0, 0, 65}; // src z blit_region.srcOffsets[1] = {64, 64, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Dest offsets exceeding source image dimensions blit_region.srcOffsets[0] = {0, 0, 0}; blit_region.srcOffsets[1] = {64, 64, 1}; blit_region.dstOffsets[0] = {96, 64, 32}; // dst x blit_region.dstOffsets[1] = {64, 0, 33}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.dstOffsets[0] = {0, 65, 32}; // dst y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blit_region.dstOffsets[0] = {0, 64, 65}; // dst z blit_region.dstOffsets[1] = {64, 0, 64}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1, &blit_region, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MiscBlitImageTests) { ASSERT_NO_FATAL_FAILURE(Init()); VkFormat f_color = VK_FORMAT_R32_SFLOAT; // Need features ..BLIT_SRC_BIT & ..BLIT_DST_BIT if (!ImageFormatAndFeaturesSupported(gpu(), f_color, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s Requested format features unavailable - MiscBlitImageTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = f_color; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // 2D color image VkImageObj color_img(m_device); color_img.init(&ci); ASSERT_TRUE(color_img.initialized()); // 2D multi-sample image ci.samples = VK_SAMPLE_COUNT_4_BIT; VkImageObj ms_img(m_device); ms_img.init(&ci); ASSERT_TRUE(ms_img.initialized()); // 3D color image ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {64, 64, 8}; VkImageObj color_3D_img(m_device); color_3D_img.init(&ci); ASSERT_TRUE(color_3D_img.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {16, 16, 1}; blitRegion.dstOffsets[0] = {32, 32, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Blit with aspectMask errors blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00241"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00242"); vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid src mip level blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.mipLevel = ci.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01705"); // invalid srcSubresource.mipLevel // Redundant unavoidable errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // out-of-bounds srcOffset.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // out-of-bounds srcOffset.y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // out-of-bounds srcOffset.z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // region not contained within src image vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid dst mip level blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.mipLevel = ci.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstSubresource-01706"); // invalid dstSubresource.mipLevel // Redundant unavoidable errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // out-of-bounds dstOffset.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // out-of-bounds dstOffset.y m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // out-of-bounds dstOffset.z m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // region not contained within dst image vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid src array layer blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcSubresource.baseArrayLayer = ci.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01707"); // invalid srcSubresource layer range vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit with invalid dst array layer blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.baseArrayLayer = ci.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstSubresource-01708"); // invalid dstSubresource layer range // Redundant unavoidable errors vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blitRegion.dstSubresource.baseArrayLayer = 0; // Blit multi-sample image // TODO: redundant VUs, one (1c8) or two (1d2 & 1d4) should be eliminated. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234"); vkCmdBlitImage(m_commandBuffer->handle(), ms_img.image(), ms_img.Layout(), ms_img.image(), ms_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); // Blit 3D with baseArrayLayer != 0 or layerCount != 1 blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcSubresource-01707"); // base+count > total layer count vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(), color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-layerCount-01700"); // layer count == 0 (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-layerCount-00239"); // src/dst layer count mismatch vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(), color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, BlitToDepthImageTests) { ASSERT_NO_FATAL_FAILURE(Init()); // Need feature ..BLIT_SRC_BIT but not ..BLIT_DST_BIT // TODO: provide more choices here; supporting D32_SFLOAT as BLIT_DST isn't unheard of. VkFormat f_depth = VK_FORMAT_D32_SFLOAT; if (!ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT) || ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT)) { printf("%s Requested format features unavailable - BlitToDepthImageTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = f_depth; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // 2D depth image VkImageObj depth_img(m_device); depth_img.init(&ci); ASSERT_TRUE(depth_img.initialized()); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {0, 0, 0}; blitRegion.srcOffsets[1] = {16, 16, 1}; blitRegion.dstOffsets[0] = {32, 32, 0}; blitRegion.dstOffsets[1] = {64, 64, 1}; m_commandBuffer->begin(); // Blit depth image - has SRC_BIT but not DST_BIT blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223"); vkCmdBlitImage(m_commandBuffer->handle(), depth_img.image(), depth_img.Layout(), depth_img.image(), depth_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MinImageTransferGranularity) { TEST_DESCRIPTION("Tests for validation of Queue Family property minImageTransferGranularity."); ASSERT_NO_FATAL_FAILURE(Init()); auto queue_family_properties = m_device->phy().queue_properties(); auto large_granularity_family = std::find_if(queue_family_properties.begin(), queue_family_properties.end(), [](VkQueueFamilyProperties family_properties) { VkExtent3D family_granularity = family_properties.minImageTransferGranularity; // We need a queue family that supports copy operations and has a large enough minImageTransferGranularity for the tests // below to make sense. return (family_properties.queueFlags & VK_QUEUE_TRANSFER_BIT || family_properties.queueFlags & VK_QUEUE_GRAPHICS_BIT || family_properties.queueFlags & VK_QUEUE_COMPUTE_BIT) && family_granularity.depth >= 4 && family_granularity.width >= 4 && family_granularity.height >= 4; }); if (large_granularity_family == queue_family_properties.end()) { printf("%s No queue family has a large enough granularity for this test to be meaningful, skipping test\n", kSkipPrefix); return; } const size_t queue_family_index = std::distance(queue_family_properties.begin(), large_granularity_family); VkExtent3D granularity = queue_family_properties[queue_family_index].minImageTransferGranularity; VkCommandPoolObj command_pool(m_device, queue_family_index, 0); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_3D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = granularity.width * 2; image_create_info.extent.height = granularity.height * 2; image_create_info.extent.depth = granularity.depth * 2; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; VkImageObj src_image_obj(m_device); src_image_obj.init(&image_create_info); ASSERT_TRUE(src_image_obj.initialized()); srcImage = src_image_obj.handle(); image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image_obj(m_device); dst_image_obj.init(&image_create_info); ASSERT_TRUE(dst_image_obj.initialized()); dstImage = dst_image_obj.handle(); VkCommandBufferObj command_buffer(m_device, &command_pool); ASSERT_TRUE(command_buffer.initialized()); command_buffer.begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = granularity.width; copyRegion.extent.height = granularity.height; copyRegion.extent.depth = granularity.depth; // Introduce failure by setting srcOffset to a bad granularity value copyRegion.srcOffset.y = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Introduce failure by setting extent to a granularity value that is bad // for both the source and destination image. copyRegion.srcOffset.y = 0; copyRegion.extent.width = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Now do some buffer/image copies VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src_and_dst(*m_device, 8 * granularity.height * granularity.width * granularity.depth, reqs); VkBufferImageCopy region = {}; region.bufferOffset = 0; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = granularity.height; region.imageExtent.width = granularity.width; region.imageExtent.depth = granularity.depth; region.imageOffset.x = 0; region.imageOffset.y = 0; region.imageOffset.z = 0; // Introduce failure by setting imageExtent to a bad granularity value region.imageExtent.width = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(command_buffer.handle(), srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.width = granularity.width; // Introduce failure by setting imageOffset to a bad granularity value region.imageOffset.z = 3; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(command_buffer.handle(), buffer.handle(), dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); command_buffer.end(); } TEST_F(VkLayerTest, MismatchedQueueFamiliesOnSubmit) { TEST_DESCRIPTION( "Submit command buffer created using one queue family and attempt to submit them on a queue created in a different queue " "family."); ASSERT_NO_FATAL_FAILURE(Init()); // assumes it initializes all queue families on vkCreateDevice // This test is meaningless unless we have multiple queue families auto queue_family_properties = m_device->phy().queue_properties(); std::vector<uint32_t> queue_families; for (uint32_t i = 0; i < queue_family_properties.size(); ++i) if (queue_family_properties[i].queueCount > 0) queue_families.push_back(i); if (queue_families.size() < 2) { printf("%s Device only has one queue family; skipped.\n", kSkipPrefix); return; } const uint32_t queue_family = queue_families[0]; const uint32_t other_queue_family = queue_families[1]; VkQueue other_queue; vkGetDeviceQueue(m_device->device(), other_queue_family, 0, &other_queue); VkCommandPoolObj cmd_pool(m_device, queue_family); VkCommandBufferObj cmd_buff(m_device, &cmd_pool); cmd_buff.begin(); cmd_buff.end(); // Submit on the wrong queue VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buff.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00074"); vkQueueSubmit(other_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassAttachmentIndexOutOfRange) { ASSERT_NO_FATAL_FAILURE(Init()); // There are no attachments, but refer to attachment 0. VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr}; VkRenderPass rp; // "... must be less than the total number of attachments ..." m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo-attachment-00834"); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassAttachmentUsedTwiceColor) { ASSERT_NO_FATAL_FAILURE(Init()); TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments. This is not acceptable."); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 2, refs, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "subpass 0 already uses attachment 0 as a color attachment"); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassAttachmentUsedTwiceMismatchingLayout) { ASSERT_NO_FATAL_FAILURE(Init()); TEST_DESCRIPTION("Attachment is used simultaneously as color and input. The layouts differ, which is not acceptable."); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference input_ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input_ref, 1, &color_ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-layout-00855"); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, RenderPassAttachmentUsedTwiceOK) { ASSERT_NO_FATAL_FAILURE(Init()); TEST_DESCRIPTION("Attachment is used simultaneously as color and input, with the same layout. This is OK."); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; VkRenderPass rp; m_errorMonitor->ExpectSuccess(); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassAttachmentUsedTwicePreserveAndColor) { ASSERT_NO_FATAL_FAILURE(Init()); TEST_DESCRIPTION("Attachment is used simultaneously as color and preserve. This is not acceptable."); VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL}; uint32_t preserve_attachment = 0; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 1, &preserve_attachment}, }; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr}; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pPreserveAttachments-00854"); vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassPipelineSubpassMismatch) { TEST_DESCRIPTION("Use a pipeline for the wrong subpass in a render pass instance"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with two subpasses, both writing the same attachment. VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 1, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 2, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); char const *vsSource = "#version 450\n" "void main() { gl_Position = vec4(1); }\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(1); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; // subtest 1: bind in the wrong subpass vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); // subtest 2: bind in correct subpass, then transition to next subpass vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1"); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassBarrierConflicts) { TEST_DESCRIPTION("Add a pipeline barrier within a subpass that has conflicting state"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkRenderPass rp_noselfdep; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); rpci.dependencyCount = 0; rpci.pDependencies = nullptr; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp_noselfdep); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-01172"); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp_noselfdep, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkMemoryBarrier mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.pNext = NULL; mem_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; mem_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); rpbi.renderPass = rp; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; // Mis-match src stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01173"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now mis-match dst stage mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-01174"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_HOST_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Set srcQueueFamilyIndex to something other than IGNORED img_barrier.srcQueueFamilyIndex = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; // Mis-match mem barrier src access mask mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; mem_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcAccessMask-01175"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); // Mis-match mem barrier dst access mask. Also set srcAccessMask to 0 which should not cause an error mem_barrier.srcAccessMask = 0; mem_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstAccessMask-01176"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); // Mis-match image barrier src access mask img_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcAccessMask-01175"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Mis-match image barrier dst access mask img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstAccessMask-01176"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Mis-match dependencyFlags img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dependencyFlags-01177"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0 /* wrong */, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Send non-zero bufferMemoryBarrierCount // Construct a valid BufferMemoryBarrier to avoid any parameter errors // First we need a valid buffer to reference VkBufferObj buffer; VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; buffer.init_as_src_and_dst(*m_device, 256, mem_reqs); VkBufferMemoryBarrier bmb = {}; bmb.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; bmb.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; bmb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; bmb.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bmb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; bmb.buffer = buffer.handle(); bmb.offset = 0; bmb.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &bmb, 0, nullptr); m_errorMonitor->VerifyFound(); // Add image barrier w/ image handle that's not in framebuffer VkImageObj lone_image(m_device); lone_image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); img_barrier.image = lone_image.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Have image barrier with mis-matched layouts img_barrier.image = image.handle(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01181"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01180"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyRenderPass(m_device->device(), rp_noselfdep, nullptr); } TEST_F(VkLayerTest, InvalidSecondaryCommandBufferBarrier) { TEST_DESCRIPTION("Add an invalid image barrier in a secondary command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); // Second image that img_barrier will incorrectly use VkImageObj image2(m_device); image2.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, rp, 0, VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors VK_FALSE, 0, 0}; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, &cbii}; vkBeginCommandBuffer(secondary.handle(), &cbbi); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image2.handle(); // Image mis-matches with FB image img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); secondary.end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179"); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageBarrierSubpassConflict) { TEST_DESCRIPTION("Check case where subpass index references different image from image barrier"); ASSERT_NO_FATAL_FAILURE(Init()); // Create RP/FB combo where subpass has incorrect index attachment, this is 2nd half of "VUID-vkCmdPipelineBarrier-image-01179" VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; // ref attachment points to wrong attachment index compared to img_barrier below VkAttachmentReference ref = {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkImageObj image2(m_device); image2.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView2 = image2.targetView(VK_FORMAT_R8G8B8A8_UNORM); // re-use imageView from start of test VkImageView iv_array[2] = {imageView, imageView2}; VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, iv_array, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); /* barrier references image from attachment index 0 */ img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, TemporaryExternalSemaphore) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external semaphore device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external semaphore import and export capability VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr, handle_type}; VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp); if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) || !(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; // Create a semaphore to export payload from VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type}; VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0}; VkSemaphore export_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore); ASSERT_VK_SUCCESS(err); // Create a semaphore to import payload into sci.pNext = nullptr; VkSemaphore import_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore); ASSERT_VK_SUCCESS(err); #ifdef _WIN32 // Export semaphore payload to an opaque handle HANDLE handle = nullptr; VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR"); err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above *temporarily* VkImportSemaphoreWin32HandleInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, handle, nullptr}; auto vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR"); err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #else // Export semaphore payload to an opaque handle int fd = 0; VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR"); err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above *temporarily* VkImportSemaphoreFdInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd}; auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR"); err = vkImportSemaphoreFdKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #endif // Wait on the imported semaphore twice in vkQueueSubmit, the second wait should be an error VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo si[] = { {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, }; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled"); vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Wait on the imported semaphore twice in vkQueueBindSparse, the second wait should be an error VkBindSparseInfo bi[] = { {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, }; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled"); vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroySemaphore(m_device->device(), export_semaphore, nullptr); vkDestroySemaphore(m_device->device(), import_semaphore, nullptr); } TEST_F(VkLayerTest, TemporaryExternalFence) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external fence instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external fence device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external fence import and export capability VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type}; VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR"); vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp); if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) || !(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; // Create a fence to export payload from VkFence export_fence; { VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type}; VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence); ASSERT_VK_SUCCESS(err); } // Create a fence to import payload into VkFence import_fence; { VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence); ASSERT_VK_SUCCESS(err); } #ifdef _WIN32 // Export fence payload to an opaque handle HANDLE handle = nullptr; { VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR"); err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceWin32HandleInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, handle, nullptr}; auto vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR"); err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #else // Export fence payload to an opaque handle int fd = 0; { VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR"); err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd}; auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR"); err = vkImportFenceFdKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #endif // Undo the temporary import vkResetFences(m_device->device(), 1, &import_fence); // Signal the previously imported fence twice, the second signal should produce a validation error vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is already in use by another submission."); vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); m_errorMonitor->VerifyFound(); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), export_fence, nullptr); vkDestroyFence(m_device->device(), import_fence, nullptr); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferBarrier) { TEST_DESCRIPTION("Add a pipeline barrier in a secondary command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, rp, 0, VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors VK_FALSE, 0, 0}; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, &cbii}; vkBeginCommandBuffer(secondary.handle(), &cbbi); VkMemoryBarrier mem_barrier = {}; mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER; mem_barrier.pNext = NULL; mem_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; mem_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, RenderPassInvalidRenderArea) { TEST_DESCRIPTION("Generate INVALID_RENDER_AREA error by beginning renderpass with extent outside of framebuffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot execute a render pass with renderArea not within the bound of the framebuffer."); // Framebuffer for render target is 256x256, exceed that for INVALID_RENDER_AREA m_renderPassBeginInfo.renderArea.extent.width = 257; m_renderPassBeginInfo.renderArea.extent.height = 257; m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DisabledIndependentBlend) { TEST_DESCRIPTION( "Generate INDEPENDENT_BLEND by disabling independent blend and then specifying different blend states for two " "attachments"); VkPhysicalDeviceFeatures features = {}; features.independentBlend = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of pAttachments must be identical"); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineObj pipeline(m_device); // Create a renderPass with two color attachments VkAttachmentReference attachments[2] = {}; attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachments[1].attachment = 1; attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = attachments; subpass.colorAttachmentCount = 2; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass renderpass; vkCreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); pipeline.AddShader(&vs); VkPipelineColorBlendAttachmentState att_state1 = {}, att_state2 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; att_state2.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state2.blendEnable = VK_FALSE; pipeline.AddColorAttachment(0, att_state1); pipeline.AddColorAttachment(1, att_state2); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), renderpass, NULL); } // Is the Pipeline compatible with the expectations of the Renderpass/subpasses? TEST_F(VkLayerTest, PipelineRenderpassCompatibility) { TEST_DESCRIPTION( "Create a graphics pipeline that is incompatible with the requirements of its contained Renderpass/subpasses."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetObj ds_obj(m_device); ds_obj.AppendDummy(); ds_obj.CreateVKDescriptorSet(m_commandBuffer); VkShaderObj vs_obj(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; VkRenderpassObj rp_obj(m_device); { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753"); VkPipelineObj pipeline(m_device); pipeline.AddShader(&vs_obj); pipeline.AddColorAttachment(0, att_state1); VkGraphicsPipelineCreateInfo info = {}; pipeline.InitGraphicsPipelineCreateInfo(&info); info.pColorBlendState = nullptr; pipeline.CreateVKPipeline(ds_obj.GetPipelineLayout(), rp_obj.handle(), &info); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CreateRenderPassAttachments) { TEST_DESCRIPTION( "Ensure that CreateRenderPass produces the expected validation errors when a subpass's attachments violate the valid usage " "conditions."); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkAttachmentDescription> attachments = { // input attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, // color attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // depth attachment {0, VK_FORMAT_D24_UNORM_S8_UINT, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, // resolve attachment {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, // preserve attachments {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector<VkAttachmentReference> input = { {0, VK_IMAGE_LAYOUT_GENERAL}, }; std::vector<VkAttachmentReference> color = { {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {2, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference depth = {3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; std::vector<VkAttachmentReference> resolve = { {4, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; std::vector<uint32_t> preserve = {5}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, (uint32_t)input.size(), input.data(), (uint32_t)color.size(), color.data(), resolve.data(), &depth, (uint32_t)preserve.size(), preserve.data()}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err; // Test too many color attachments { std::vector<VkAttachmentReference> too_many_colors(m_device->props.limits.maxColorAttachments + 1, color[0]); subpass.colorAttachmentCount = (uint32_t)too_many_colors.size(); subpass.pColorAttachments = too_many_colors.data(); subpass.pResolveAttachments = NULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-colorAttachmentCount-00845"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); subpass.colorAttachmentCount = (uint32_t)color.size(); subpass.pColorAttachments = color.data(); subpass.pResolveAttachments = resolve.data(); } // Test sample count mismatch between color buffers attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_8_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-samples-parameter"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); attachments[subpass.pColorAttachments[1].attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; // Test sample count mismatch between color buffers and depth buffer attachments[subpass.pDepthStencilAttachment->attachment].samples = VK_SAMPLE_COUNT_8_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-samples-parameter"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); attachments[subpass.pDepthStencilAttachment->attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples; // Test resolve attachment with UNUSED color attachment color[0].attachment = VK_ATTACHMENT_UNUSED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00847"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); color[0].attachment = 1; // Test resolve from a single-sampled color attachment attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_1_BIT; // avoid mismatch (00337) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00848"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_4_BIT; // Test resolve to a multi-sampled resolve attachment attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00849"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT; // Test with color/resolve format mismatch attachments[subpass.pColorAttachments[0].attachment].format = VK_FORMAT_R8G8B8A8_SRGB; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00850"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); attachments[subpass.pColorAttachments[0].attachment].format = attachments[subpass.pResolveAttachments[0].attachment].format; // Test for UNUSED preserve attachments preserve[0] = VK_ATTACHMENT_UNUSED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-attachment-00853"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); preserve[0] = 5; // Test for preserve attachments used elsewhere in the subpass color[0].attachment = preserve[0]; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pPreserveAttachments-00854"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); color[0].attachment = 1; // test for layout mismatch between input attachment and color attachment input[0].attachment = color[0].attachment; input[0].layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-layout-00855"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); input[0].attachment = 0; input[0].layout = VK_IMAGE_LAYOUT_GENERAL; // test for layout mismatch between input attachment and depth attachment input[0].attachment = depth.attachment; input[0].layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-layout-00855"); err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); input[0].attachment = 0; input[0].layout = VK_IMAGE_LAYOUT_GENERAL; // Test for attachment used first as input with loadOp=CLEAR { std::vector<VkSubpassDescription> subpasses = {subpass, subpass, subpass}; subpasses[0].inputAttachmentCount = 0; subpasses[1].inputAttachmentCount = 0; attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; VkRenderPassCreateInfo rpci_multipass = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, (uint32_t)attachments.size(), attachments.data(), (uint32_t)subpasses.size(), subpasses.data(), 0, nullptr}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-loadOp-00846"); err = vkCreateRenderPass(m_device->device(), &rpci_multipass, nullptr, &rp); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr); attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; } } TEST_F(VkLayerTest, FramebufferCreateErrors) { TEST_DESCRIPTION( "Hit errors when attempting to create a framebuffer :\n" " 1. Mismatch between framebuffer & renderPass attachmentCount\n" " 2. Use a color image as depthStencil attachment\n" " 3. Mismatch framebuffer & renderPass attachment formats\n" " 4. Mismatch framebuffer & renderPass attachment #samples\n" " 5. Framebuffer attachment w/ non-1 mip-levels\n" " 6. Framebuffer attachment where dimensions don't match\n" " 7. Framebuffer attachment where dimensions don't match\n" " 8. Framebuffer attachment w/o identity swizzle\n" " 9. framebuffer dimensions exceed physical device limits\n"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-attachmentCount-00876"); // Create a renderPass with a single color attachment VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); VkImageView ivs[2]; ivs[0] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); ivs[1] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; // Set mis-matching attachmentCount fb_info.attachmentCount = 2; fb_info.pAttachments = ivs; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); // Create a renderPass with a depth-stencil attachment created with // IMAGE_USAGE_COLOR_ATTACHMENT // Add our color attachment to pDepthStencilAttachment subpass.pDepthStencilAttachment = &attach; subpass.pColorAttachments = NULL; VkRenderPass rp_ds; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_ds); ASSERT_VK_SUCCESS(err); // Set correct attachment count, but attachment has COLOR usage bit set fb_info.attachmentCount = 1; fb_info.renderPass = rp_ds; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00878"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp_ds, NULL); // Create new renderpass with alternate attachment format from fb attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; subpass.pDepthStencilAttachment = NULL; subpass.pColorAttachments = &attach; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched formats between rp & fb // rp attachment 0 now has RGBA8 but corresponding fb attach is BGRA8 fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00880"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); // Create new renderpass with alternate sample count from fb attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_4_BIT; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); // Cause error due to mis-matched sample count between rp & fb fb_info.renderPass = rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00881"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); { // Create an image with 2 mip levels. VkImageObj image(m_device); image.Init(128, 128, 2, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create a image view with two mip levels. VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; // Set level count to 2 (only 1 is allowed for FB attachment) ivci.subresourceRange.levelCount = 2; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); // Re-create renderpass to have matching sample count attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); fb_info.renderPass = rp; fb_info.pAttachments = &view; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00883"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyImageView(m_device->device(), view, NULL); } // Update view to original color buffer and grow FB dimensions too big fb_info.pAttachments = ivs; fb_info.height = 1024; fb_info.width = 1024; fb_info.layers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } { // Create an image with one mip level. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); // Create view attachment with non-identity swizzle VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_B8G8R8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; ivci.components.r = VK_COMPONENT_SWIZZLE_G; ivci.components.g = VK_COMPONENT_SWIZZLE_R; ivci.components.b = VK_COMPONENT_SWIZZLE_A; ivci.components.a = VK_COMPONENT_SWIZZLE_B; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); fb_info.pAttachments = &view; fb_info.height = 100; fb_info.width = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00884"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyImageView(m_device->device(), view, NULL); } // reset attachment to color attachment fb_info.pAttachments = ivs; // Request fb that exceeds max width fb_info.width = m_device->props.limits.maxFramebufferWidth + 1; fb_info.height = 100; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00886"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and width=0 fb_info.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00885"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max height fb_info.width = 100; fb_info.height = m_device->props.limits.maxFramebufferHeight + 1; fb_info.layers = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00888"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and height=0 fb_info.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00887"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // Request fb that exceeds max layers fb_info.width = 100; fb_info.height = 100; fb_info.layers = m_device->props.limits.maxFramebufferLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00890"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } // and layers=0 fb_info.layers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00889"); err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyFramebuffer(m_device->device(), fb, NULL); } vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, DynamicDepthBiasNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Depth Bias dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic depth bias m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bias state not set for this command buffer"); VKTriangleTest(BsoFailDepthBias); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicLineWidthNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Line Width dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic line width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic line width state not set for this command buffer"); VKTriangleTest(BsoFailLineWidth); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicViewportNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Viewport dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic viewport state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic viewport(s) 0 are used by pipeline state object, but were not provided"); VKTriangleTest(BsoFailViewport); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicScissorNotBound) { TEST_DESCRIPTION("Run a simple draw calls to validate failure when Scissor dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic scissor state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, but were not provided"); VKTriangleTest(BsoFailScissor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicBlendConstantsNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Blend Constants dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic blend constant state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic blend constants state not set for this command buffer"); VKTriangleTest(BsoFailBlend); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicDepthBoundsNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Depth Bounds dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().depthBounds) { printf("%s Device does not support depthBounds test; skipped.\n", kSkipPrefix); return; } // Dynamic depth bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bounds state not set for this command buffer"); VKTriangleTest(BsoFailDepthBounds); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilReadNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Read dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil read mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil read mask state not set for this command buffer"); VKTriangleTest(BsoFailStencilReadMask); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilWriteNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Write dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil write mask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil write mask state not set for this command buffer"); VKTriangleTest(BsoFailStencilWriteMask); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DynamicStencilRefNotBound) { TEST_DESCRIPTION( "Run a simple draw calls to validate failure when Stencil Ref dynamic state is required but not correctly bound."); ASSERT_NO_FATAL_FAILURE(Init()); // Dynamic stencil reference m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic stencil reference state not set for this command buffer"); VKTriangleTest(BsoFailStencilReference); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferNotBound) { TEST_DESCRIPTION("Run an indexed draw call without an index buffer bound."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Index buffer object not bound to this command buffer when Indexed "); VKTriangleTest(BsoFailIndexBuffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadSize) { TEST_DESCRIPTION("Run indexed draw call with bad index buffer size."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadSize); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadOffset) { TEST_DESCRIPTION("Run indexed draw call with bad index buffer offset."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadOffset); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadBindSize) { TEST_DESCRIPTION("Run bind index buffer with a size greater than the index buffer."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadMapSize); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IndexBufferBadBindOffset) { TEST_DESCRIPTION("Run bind index buffer with an offset greater than the size of the index buffer."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size "); VKTriangleTest(BsoFailIndexBufferBadMapOffset); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandBufferTwoSubmits) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // We luck out b/c by default the framework creates CB w/ the // VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set m_commandBuffer->begin(); m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color); m_commandBuffer->end(); // Bypass framework since it does the waits automatically VkResult err = VK_SUCCESS; VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); vkQueueWaitIdle(m_device->m_queue); // Cause validation error by re-submitting cmd buffer that should only be // submitted once err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AllocDescriptorFromEmptyPool) { TEST_DESCRIPTION("Attempt to allocate more sets and descriptors than descriptor pool has available."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // This test is valid for Vulkan 1.0 only -- skip if device has an API version greater than 1.0. if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf("%s Device has apiVersion greater than 1.0 -- skipping Descriptor Set checks.\n", kSkipPrefix); return; } // Create Pool w/ 1 Sampler descriptor, but try to alloc Uniform Buffer // descriptor from it VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = 0; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding_samp = {}; dsl_binding_samp.binding = 0; dsl_binding_samp.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding_samp.descriptorCount = 1; dsl_binding_samp.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding_samp.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_samp(m_device, {dsl_binding_samp}); // Try to allocate 2 sets when pool only has 1 set VkDescriptorSet descriptor_sets[2]; VkDescriptorSetLayout set_layouts[2] = {ds_layout_samp.handle(), ds_layout_samp.handle()}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 2; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = set_layouts; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306"); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets); m_errorMonitor->VerifyFound(); alloc_info.descriptorSetCount = 1; // Create layout w/ descriptor type not available in pool VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_ub.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307"); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, FreeDescriptorFromOneShotPool) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-descriptorPool-00312"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = 0; // Not specifying VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT means // app can only call vkResetDescriptorPool on this pool.; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); err = vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorPool) { // Attempt to clear Descriptor Pool with bad object. // ObjectTracker should catch this. ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-parameter"); uint64_t fake_pool_handle = 0xbaad6001; VkDescriptorPool bad_pool = reinterpret_cast<VkDescriptorPool &>(fake_pool_handle); vkResetDescriptorPool(device(), bad_pool, 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDescriptorSet) { // Attempt to bind an invalid Descriptor Set to a valid Command Buffer // ObjectTracker should catch this. // Create a valid cmd buffer // call vkCmdBindDescriptorSets w/ false Descriptor Set uint64_t fake_set_handle = 0xbaad6001; VkDescriptorSet bad_set = reinterpret_cast<VkDescriptorSet &>(fake_set_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj descriptor_set_layout(m_device, {layout_binding}); const VkPipelineLayoutObj pipeline_layout(DeviceObj(), {&descriptor_set_layout}); m_commandBuffer->begin(); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &bad_set, 0, NULL); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidDescriptorSetLayout) { // Attempt to create a Pipeline Layout with an invalid Descriptor Set Layout. // ObjectTracker should catch this. uint64_t fake_layout_handle = 0xbaad6001; VkDescriptorSetLayout bad_layout = reinterpret_cast<VkDescriptorSetLayout &>(fake_layout_handle); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo plci = {}; plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; plci.pNext = NULL; plci.setLayoutCount = 1; plci.pSetLayouts = &bad_layout; vkCreatePipelineLayout(device(), &plci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, WriteDescriptorSetIntegrityCheck) { TEST_DESCRIPTION( "This test verifies some requirements of chapter 13.2.3 of the Vulkan Spec " "1) A uniform buffer update must have a valid buffer index. " "2) When using an array of descriptors in a single WriteDescriptor, the descriptor types and stageflags " "must all be the same. " "3) Immutable Sampler state must match across descriptors"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00324"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorPoolSize ds_type_count[4] = {}; ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count[0].descriptorCount = 1; ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[1].descriptorCount = 1; ds_type_count[2].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[2].descriptorCount = 1; ds_type_count[3].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; ds_type_count[3].descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = sizeof(ds_type_count) / sizeof(VkDescriptorPoolSize); ds_pool_ci.pPoolSizes = ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dslb1 = {}; dslb1.binding = 0; dslb1.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb1.descriptorCount = 1; dslb1.stageFlags = VK_SHADER_STAGE_ALL; dslb1.pImmutableSamplers = NULL; VkDescriptorSetLayoutBinding dslb2 = {}; dslb2.binding = 1; dslb2.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb2.descriptorCount = 1; dslb2.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb2.pImmutableSamplers = NULL; VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dslb3 = {}; dslb3.binding = 2; dslb3.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb3.descriptorCount = 1; dslb3.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb3.pImmutableSamplers = static_cast<VkSampler *>(&sampler); const std::vector<VkDescriptorSetLayoutBinding> layout_bindings = {dslb1, dslb2, dslb3}; const VkDescriptorSetLayoutObj ds_layout(m_device, layout_bindings); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptorSet; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; // 1) The uniform buffer is intentionally invalid here vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dyub; buffInfo[0].offset = 0; buffInfo[0].range = 1024; buffInfo[1].buffer = dyub; buffInfo[1].offset = 0; buffInfo[1].range = 1024; descriptor_write.pBufferInfo = buffInfo; descriptor_write.descriptorCount = 2; // 2) The stateFlags don't match between the first and second descriptor m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // 3) The second descriptor has a null_ptr pImmutableSamplers and // the third descriptor contains an immutable sampler descriptor_write.dstBinding = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; // Make pImageInfo index non-null to avoid complaints of it missing VkDescriptorImageInfo imageInfo = {}; imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; descriptor_write.pImageInfo = &imageInfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, WriteDescriptorSetConsecutiveUpdates) { TEST_DESCRIPTION( "Verifies that updates rolling over to next descriptor work correctly by destroying buffer from consecutive update known " "to be used in descriptor set and verifying that error is flagged."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); uint32_t qfi = 0; VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 2048; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj buffer0; buffer0.init(*m_device, bci); VkPipelineObj pipe(m_device); { // Scope 2nd buffer to cause early destruction VkBufferObj buffer1; bci.size = 1024; buffer1.init(*m_device, bci); VkDescriptorBufferInfo buffer_info[3] = {}; buffer_info[0].buffer = buffer0.handle(); buffer_info[0].offset = 0; buffer_info[0].range = 1024; buffer_info[1].buffer = buffer0.handle(); buffer_info[1].offset = 1024; buffer_info[1].range = 1024; buffer_info[2].buffer = buffer1.handle(); buffer_info[2].offset = 0; buffer_info[2].range = 1024; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; // descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 3; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = buffer_info; // Update descriptor vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO that uses the uniform buffers char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "layout(set=0) layout(binding=1) uniform blah { int x; } duh;\n" "void main(){\n" " x = vec4(duh.x, bar.y, bar.x, 1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } // buffer2 just went out of scope and was destroyed along with its memory m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DeviceMemory "); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineLayoutExceedsSetLimit) { TEST_DESCRIPTION("Attempt to create a pipeline layout using more than the physical limit of SetLayouts."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &layout_binding; VkDescriptorSetLayout ds_layout = {}; VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); // Create an array of DSLs, one larger than the physical limit const auto excess_layouts = 1 + m_device->phy().properties().limits.maxBoundDescriptorSets; std::vector<VkDescriptorSetLayout> dsl_array(excess_layouts, ds_layout); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = excess_layouts; pipeline_layout_ci.pSetLayouts = dsl_array.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286"); VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Clean up vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessPerStageDescriptors) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed per-stage limits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_combined = std::min(max_samplers, max_sampled_images); uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({max_uniform_buffers, max_storage_buffers, max_sampled_images, max_storage_images, max_samplers})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe0023e - too many sampler type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_combined; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); if ((max_samplers + max_combined) > sum_samplers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too } if (max_combined > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00240 - too many uniform buffer type descriptors in vertex stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = max_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); if (dslb.descriptorCount > sum_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); // expect all-stages sum too } if (dslb.descriptorCount > sum_dyn_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00242 - too many storage buffer type descriptors in compute stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = max_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_ALL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); if (dslb.descriptorCount > sum_dyn_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); // expect all-stages sum too } if (dslb_vec[0].descriptorCount + dslb_vec[2].descriptorCount > sum_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00244 - too many sampled image type descriptors in multiple stages dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dslb.descriptorCount = max_sampled_images; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorCount = max_combined; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); if (max_combined + 2 * max_sampled_images > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too } if (max_combined > sum_samplers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00246 - too many storage image type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = 1 + (max_storage_images / 2); dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); if (2 * dslb.descriptorCount > sum_storage_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d18 - too many input attachments in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = 1 + max_input_attachments; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); if (dslb.descriptorCount > sum_input_attachments) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); // expect all-stages sum too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessDescriptorsOverall) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed limits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({sum_dyn_uniform_buffers, sum_uniform_buffers, sum_dyn_storage_buffers, sum_storage_buffers, sum_sampled_images, sum_storage_images, sum_samplers, sum_input_attachments})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe00d1a - too many sampler type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = sum_samplers / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = sum_samplers - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); if (dslb.descriptorCount > max_samplers) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); // Expect max-per-stage samplers exceeds limits } if (dslb.descriptorCount > sum_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // Expect max overall sampled image count exceeds limits } if (dslb.descriptorCount > max_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max per-stage sampled image count exceeds limits } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1c - too many uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = sum_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); if (dslb.descriptorCount > max_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1e - too many dynamic uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); if (dslb.descriptorCount > max_uniform_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d20 - too many storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = sum_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); if (dslb.descriptorCount > max_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d22 - too many dynamic storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); if (dslb.descriptorCount > max_storage_buffers) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d24 - too many sampled image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; // revisit: not robust to odd limits. uint32_t remaining = (max_samplers > sum_sampled_images ? 0 : (sum_sampled_images - max_samplers) / 2); dslb.descriptorCount = 1 + remaining; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > max_sampled_images) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max-per-stage sampled images to exceed limits } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d26 - too many storage image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = sum_storage_images / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.descriptorCount = sum_storage_images - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); if (dslb.descriptorCount > max_storage_images) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d28 - too many input attachment type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = sum_input_attachments + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); if (dslb.descriptorCount > max_input_attachments) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); // expect max-per-stage too } err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferBufferDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a buffer dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 256; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); vkCmdFillBuffer(m_commandBuffer->handle(), buffer, 0, VK_WHOLE_SIZE, 0); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); // Destroy buffer dependency prior to submit to cause ERROR vkDestroyBuffer(m_device->device(), buffer, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); vkFreeMemory(m_device->handle(), mem, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferBufferViewDestroyed) { TEST_DESCRIPTION("Delete bufferView bound to cmd buffer, then attempt to submit cmd buffer."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count; ds_type_count.type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding layout_binding; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; layout_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {layout_binding}); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptor_set; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = imageLoad(s, 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound BufferView "); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Bind pipeline to cmd buffer - This causes crash on Mali vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, nullptr); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Delete BufferView in order to invalidate cmd buffer vkDestroyBufferView(m_device->device(), view, NULL); // Now attempt submit of cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Clean-up vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferImageDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an image dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Have to bind memory to image before recording cmd in cmd buffer using it VkMemoryRequirements mem_reqs; VkDeviceMemory image_mem; bool pass; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkClearColorValue ccv; ccv.float32[0] = 1.0f; ccv.float32[1] = 1.0f; ccv.float32[2] = 1.0f; ccv.float32[3] = 1.0f; VkImageSubresourceRange isr = {}; isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; isr.baseArrayLayer = 0; isr.baseMipLevel = 0; isr.layerCount = 1; isr.levelCount = 1; vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); // Destroy image dependency prior to submit to cause ERROR vkDestroyImage(m_device->device(), image, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), image_mem, nullptr); } TEST_F(VkLayerTest, InvalidCmdBufferFramebufferImageDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a framebuffer image dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { printf("%s Image format doesn't support required features.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo image_ci = {}; image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_ci.pNext = NULL; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = 1; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_ci.flags = 0; VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image)); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; m_renderPassBeginInfo.renderArea.extent.width = 32; m_renderPassBeginInfo.renderArea.extent.height = 32; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image attached to framebuffer to invalidate cmd buffer vkDestroyImage(m_device->device(), image, NULL); // Now attempt to submit cmd buffer and verify error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); vkFreeMemory(m_device->device(), image_memory, nullptr); } TEST_F(VkLayerTest, FramebufferInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use framebuffer."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put it in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy framebuffer while in-flight m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyFramebuffer-framebuffer-00892"); vkDestroyFramebuffer(m_device->device(), fb, NULL); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy everything vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If framebuffer is not VK_NULL_HANDLE, framebuffer must be a valid VkFramebuffer handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Framebuffer obj"); vkDestroyFramebuffer(m_device->device(), fb, nullptr); } TEST_F(VkLayerTest, FramebufferImageInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use image that's child of framebuffer."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; VkResult err = VK_SUCCESS; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageCreateInfo image_ci = {}; image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_ci.pNext = NULL; image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.format = VK_FORMAT_B8G8R8A8_UNORM; image_ci.extent.width = 256; image_ci.extent.height = 256; image_ci.extent.depth = 1; image_ci.mipLevels = 1; image_ci.arrayLayers = 1; image_ci.samples = VK_SAMPLE_COUNT_1_BIT; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_ci.flags = 0; VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image)); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image, VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Just use default renderpass with our framebuffer m_renderPassBeginInfo.framebuffer = fb; // Create Null cmd buffer for submit m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put it (and attached imageView) in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer to put framebuffer and children in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy image attached to framebuffer while in-flight m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImage-image-01000"); vkDestroyImage(m_device->device(), image, NULL); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy image and other objects vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If image is not VK_NULL_HANDLE, image must be a valid VkImage handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Image obj"); vkDestroyImage(m_device->device(), image, NULL); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); vkFreeMemory(m_device->device(), image_memory, nullptr); } TEST_F(VkLayerTest, RenderPassInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use renderPass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create simple renderpass VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); ASSERT_VK_SUCCESS(err); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); VkRenderPassBeginInfo rpbi = {}; rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbi.framebuffer = m_framebuffer; rpbi.renderPass = rp; m_commandBuffer->BeginRenderPass(rpbi); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-00873"); vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyFound(); // Wait for queue to complete so we can safely destroy rp vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If renderPass is not VK_NULL_HANDLE, renderPass must be a valid VkRenderPass handle"); m_errorMonitor->SetUnexpectedError("Was it created? Has it already been destroyed?"); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, ImageMemoryNotBound) { TEST_DESCRIPTION("Attempt to draw with an image which has not had memory bound to it."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Have to bind memory to image before recording cmd in cmd buffer using it VkMemoryRequirements mem_reqs; VkDeviceMemory image_mem; bool pass; VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem); ASSERT_VK_SUCCESS(err); // Introduce error, do not call vkBindImageMemory(m_device->device(), image, image_mem, 0); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()."); m_commandBuffer->begin(); VkClearColorValue ccv; ccv.float32[0] = 1.0f; ccv.float32[1] = 1.0f; ccv.float32[2] = 1.0f; ccv.float32[3] = 1.0f; VkImageSubresourceRange isr = {}; isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; isr.baseArrayLayer = 0; isr.baseMipLevel = 0; isr.layerCount = 1; isr.levelCount = 1; vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), image_mem, nullptr); } TEST_F(VkLayerTest, BufferMemoryNotBound) { TEST_DESCRIPTION("Attempt to copy from a buffer which has not had memory bound to it."); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkBuffer buffer; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; buf_info.size = 1024; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = 1024; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); // Introduce failure by not calling vkBindBufferMemory(m_device->device(), buffer, mem, 0); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); VkBufferImageCopy region = {}; region.bufferRowLength = 16; region.bufferImageHeight = 16; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageExtent.height = 4; region.imageExtent.width = 4; region.imageExtent.depth = 1; m_commandBuffer->begin(); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->handle(), mem, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferEventDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an event dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo evci = {}; evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; VkResult result = vkCreateEvent(m_device->device(), &evci, NULL, &event); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Event "); // Destroy event dependency prior to submit to cause ERROR vkDestroyEvent(m_device->device(), event, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferQueryPoolDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a query pool dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); VkQueryPool query_pool; VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_TIMESTAMP; qpci.queryCount = 1; VkResult result = vkCreateQueryPool(m_device->device(), &qpci, nullptr, &query_pool); ASSERT_VK_SUCCESS(result); m_commandBuffer->begin(); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound QueryPool "); // Destroy query pool dependency prior to submit to cause ERROR vkDestroyQueryPool(m_device->device(), query_pool, NULL); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdBufferPipelineDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a pipeline dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); { // Use helper to create graphics pipeline CreatePipelineHelper helper(*this); helper.InitInfo(); helper.InitState(); helper.CreateGraphicsPipeline(); // Bind helper pipeline to command buffer m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_); m_commandBuffer->end(); // pipeline will be destroyed when helper goes out of scope } // Cause error by submitting command buffer that references destroyed pipeline m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Pipeline "); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, DestroyPipelineRenderPass) { TEST_DESCRIPTION("Draw using a pipeline whose create renderPass has been destroyed."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkResult err; // Create a renderPass that's compatible with Draw-time renderPass VkAttachmentDescription att = {}; att.format = m_render_target_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference ref = {}; ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; ref.attachment = 0; m_renderPassClearValues.clear(); VkClearValue clear = {}; clear.color = m_clear_color; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &ref; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = NULL; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = 1; rp_info.pAttachments = &att; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; VkRenderPass rp; err = vkCreateRenderPass(device(), &rp_info, NULL, &rp); ASSERT_VK_SUCCESS(err); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); const VkPipelineLayoutObj pl(m_device); pipe.CreateVKPipeline(pl.handle(), rp); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Destroy renderPass before pipeline is used in Draw // We delay until after CmdBindPipeline to verify that invalid binding isn't // created between CB & renderPass, which we used to do. vkDestroyRenderPass(m_device->device(), rp, nullptr); vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetBufferDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor set with a buffer dependency being " "destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffer so we can make it to the appropriate // error VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to set memory type.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = buffer; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &m_viewports[0]); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &m_scissors[0]); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer "); // Destroy buffer should invalidate the cmd buffer, causing error on submit vkDestroyBuffer(m_device->device(), buffer, NULL); // Attempt to submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetImageSamplerDestroyed) { TEST_DESCRIPTION( "Attempt to draw with a command buffer that is invalid due to a bound descriptor sets with a combined image sampler having " "their image, sampler, and descriptor set each respectively destroyed and then attempting to submit associated cmd " "buffers. Attempt to destroy a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; VkImage image2; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for both images memory_info.allocationSize = memory_reqs.size * 2; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); // Bind second image to memory right after first image err = vkBindImageMemory(m_device->device(), image2, image_memory, memory_reqs.size); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView tmp_view; // First test deletes this view VkImageView view; VkImageView view2; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &tmp_view); ASSERT_VK_SUCCESS(err); err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); image_view_create_info.image = image2; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view2); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkSampler sampler2; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler2); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = tmp_view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // This first submit should be successful vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); // Now destroy imageview and reset cmdBuffer vkDestroyImageView(m_device->device(), tmp_view, NULL); m_commandBuffer->reset(0); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that has been destroyed."); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Re-update descriptor with new view img_info.imageView = view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now test destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy sampler invalidates the cmd buffer, causing error on submit vkDestroySampler(m_device->device(), sampler, NULL); // Attempt to submit cmd buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound Sampler"); submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now re-update descriptor with valid sampler and delete image img_info.sampler = sampler2; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); VkCommandBufferBeginInfo info = {}; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image "); m_commandBuffer->begin(&info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Destroy image invalidates the cmd buffer, causing error on submit vkDestroyImage(m_device->device(), image, NULL); // Attempt to submit cmd buffer submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Now update descriptor to be valid, but then free descriptor img_info.imageView = view2; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(&info); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Immediately try to destroy the descriptor set in the active command buffer - failure expected m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call vkFreeDescriptorSets() on descriptor set 0x"); vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); m_errorMonitor->VerifyFound(); // Try again once the queue is idle - should succeed w/o error // TODO - though the particular error above doesn't re-occur, there are other 'unexpecteds' still to clean up vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError( "pDescriptorSets must be a valid pointer to an array of descriptorSetCount VkDescriptorSet handles, each element of which " "must either be a valid handle or VK_NULL_HANDLE"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorSet obj"); vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet); // Attempt to submit cmd buffer containing the freed descriptor set submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DescriptorSet "); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); // Cleanup vkFreeMemory(m_device->device(), image_memory, NULL); vkDestroySampler(m_device->device(), sampler2, NULL); vkDestroyImage(m_device->device(), image2, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyImageView(m_device->device(), view2, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidDescriptorSetSamplerDestroyed) { TEST_DESCRIPTION("Attempt to draw with a bound descriptor sets with a combined image sampler where sampler has been deleted."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); // Create images to update the descriptor with VkImageObj image(m_device); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; VkResult err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Destroy the sampler before it's bound to the cmd buffer vkDestroySampler(m_device->device(), sampler, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); // First error case is destroying sampler prior to cmd buffer submission m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " Descriptor in binding #0 at global descriptor index 0 is using sampler "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, ImageDescriptorLayoutMismatch) { TEST_DESCRIPTION("Update an image sampler with a layout that doesn't match the actual image layout at the image is used."); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with const VkFormat format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; // This should cause a mis-match. Actual layout at use time is SHADER_RO img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkCommandBufferObj cmd_buf(m_device, m_commandPool); cmd_buf.begin(); // record layout different than actual descriptor layout of SHADER_RO image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); cmd_buf.BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); vkCmdSetScissor(cmd_buf.handle(), 0, 1, &scissor); // At draw time the update layout will mis-match the actual layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageLayout-00344"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " Image layout specified at vkUpdateDescriptorSets() time doesn't match actual image layout at time descriptor is used."); cmd_buf.Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); cmd_buf.EndRenderPass(); cmd_buf.end(); // Submit cmd buffer VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buf.handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, DescriptorPoolInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, NULL); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Destroy pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, NULL); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); // TODO : It seems Validation layers think ds_pool was already destroyed, even though it wasn't? } TEST_F(VkLayerTest, DescriptorPoolInUseResetSignaled) { TEST_DESCRIPTION("Reset a DescriptorPool with a DescriptorSet that is in use."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = nullptr; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, nullptr, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = nullptr; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create image to update the descriptor with VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); // Create Sampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptor_set; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, nullptr); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_set, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer to put pool in-flight VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); // Reset pool while in-flight, causing error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-00313"); vkResetDescriptorPool(m_device->device(), ds_pool, 0); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Cleanup vkDestroySampler(m_device->device(), sampler, nullptr); m_errorMonitor->SetUnexpectedError( "If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj"); vkDestroyDescriptorPool(m_device->device(), ds_pool, nullptr); } TEST_F(VkLayerTest, DescriptorImageUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt an image descriptor set update where image's bound memory has been freed."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create images to update the descriptor with VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); // Initially bind memory to avoid error at bind view time. We'll break binding before update. VkMemoryRequirements memory_reqs; VkDeviceMemory image_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Allocate enough memory for image memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image, image_memory, 0); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); ASSERT_VK_SUCCESS(err); // Create Samplers VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); // Update descriptor with image and sampler VkDescriptorImageInfo img_info = {}; img_info.sampler = sampler; img_info.imageView = view; img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &img_info; // Break memory binding and attempt update vkFreeMemory(m_device->device(), image_memory, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " previously bound memory was freed. Memory must not be freed prior to this operation."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Cleanup vkDestroyImage(m_device->device(), image, NULL); vkDestroySampler(m_device->device(), sampler, NULL); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPipeline) { // Attempt to bind an invalid Pipeline to a valid Command Buffer // ObjectTracker should catch this. // Create a valid cmd buffer // call vkCmdBindPipeline w/ false Pipeline uint64_t fake_pipeline_handle = 0xbaad6001; VkPipeline bad_pipeline = reinterpret_cast<VkPipeline &>(fake_pipeline_handle); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, bad_pipeline); m_errorMonitor->VerifyFound(); // Now issue a draw call with no pipeline bound m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "At Draw/Dispatch time no valid VkPipeline is bound!"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Finally same check once more but with Dispatch/Compute m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "At Draw/Dispatch time no valid VkPipeline is bound!"); vkCmdEndRenderPass(m_commandBuffer->handle()); // must be outside renderpass vkCmdDispatch(m_commandBuffer->handle(), 0, 0, 0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CmdDispatchExceedLimits) { TEST_DESCRIPTION("Compute dispatch with dimensions that exceed device limits"); // Enable KHX device group extensions, if available if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool khx_dg_ext_available = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME); khx_dg_ext_available = true; } ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t x_limit = m_device->props.limits.maxComputeWorkGroupCount[0]; uint32_t y_limit = m_device->props.limits.maxComputeWorkGroupCount[1]; uint32_t z_limit = m_device->props.limits.maxComputeWorkGroupCount[2]; if (std::max({x_limit, y_limit, z_limit}) == UINT32_MAX) { printf("%s device maxComputeWorkGroupCount limit reports UINT32_MAX, test not possible, skipping.\n", kSkipPrefix); return; } // Create a minimal compute pipeline std::string cs_text = "#version 450\nvoid main() {}\n"; // minimal no-op shader VkShaderObj cs_obj(m_device, cs_text.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, this); VkPipelineLayoutCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; info.pNext = nullptr; VkPipelineLayout pipe_layout; vkCreatePipelineLayout(device(), &info, nullptr, &pipe_layout); VkComputePipelineCreateInfo pipeline_info = {}; pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO; pipeline_info.pNext = nullptr; pipeline_info.flags = khx_dg_ext_available ? VK_PIPELINE_CREATE_DISPATCH_BASE_KHR : 0; pipeline_info.layout = pipe_layout; pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; pipeline_info.stage.pNext = nullptr; pipeline_info.stage.flags = 0; pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; pipeline_info.stage.module = cs_obj.handle(); pipeline_info.stage.pName = "main"; pipeline_info.stage.pSpecializationInfo = nullptr; VkPipeline cs_pipeline; vkCreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &cs_pipeline); // Bind pipeline to command buffer m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline); // Dispatch counts that exceed device limits m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountX-00386"); vkCmdDispatch(m_commandBuffer->handle(), x_limit + 1, y_limit, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountY-00387"); vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit + 1, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountZ-00388"); vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit, z_limit + 1); m_errorMonitor->VerifyFound(); if (khx_dg_ext_available) { PFN_vkCmdDispatchBaseKHR fp_vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)vkGetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR"); // Base equals or exceeds limit m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00421"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit, y_limit - 1, z_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00422"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit, z_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupZ-00423"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit - 1, z_limit, 0, 0, 0); m_errorMonitor->VerifyFound(); // (Base + count) exceeds limit uint32_t x_base = x_limit / 2; uint32_t y_base = y_limit / 2; uint32_t z_base = z_limit / 2; x_limit -= x_base; y_limit -= y_base; z_limit -= z_base; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountX-00424"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit + 1, y_limit, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountY-00425"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit + 1, z_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountZ-00426"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit, z_limit + 1); m_errorMonitor->VerifyFound(); } else { printf("%s KHX_DEVICE_GROUP_* extensions not supported, skipping CmdDispatchBaseKHR() tests.\n", kSkipPrefix); } // Clean up vkDestroyPipeline(device(), cs_pipeline, nullptr); vkDestroyPipelineLayout(device(), pipe_layout, nullptr); } TEST_F(VkLayerTest, MultiplaneImageLayoutBadAspectFlags) { TEST_DESCRIPTION("Query layout of a multiplane image using illegal aspect flag masks"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image_2plane, image_3plane; ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR; VkResult err = vkCreateImage(device(), &ci, NULL, &image_2plane); ASSERT_VK_SUCCESS(err); ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; err = vkCreateImage(device(), &ci, NULL, &image_3plane); ASSERT_VK_SUCCESS(err); // Query layout of 3rd plane, for a 2-plane image VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout = {}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01581"); vkGetImageSubresourceLayout(device(), image_2plane, &subres, &layout); m_errorMonitor->VerifyFound(); // Query layout using color aspect, for a 3-plane image subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01582"); vkGetImageSubresourceLayout(device(), image_3plane, &subres, &layout); m_errorMonitor->VerifyFound(); // Clean up vkDestroyImage(device(), image_2plane, NULL); vkDestroyImage(device(), image_3plane, NULL); } TEST_F(VkPositiveLayerTest, MultiplaneGetImageSubresourceLayout) { TEST_DESCRIPTION("Positive test, query layout of a single plane of a multiplane image. (repro Github #2530)"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_LINEAR; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image; VkResult err = vkCreateImage(device(), &ci, NULL, &image); ASSERT_VK_SUCCESS(err); // Query layout of 3rd plane VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; subres.mipLevel = 0; subres.arrayLayer = 0; VkSubresourceLayout layout = {}; m_errorMonitor->ExpectSuccess(); vkGetImageSubresourceLayout(device(), image, &subres, &layout); m_errorMonitor->VerifyNotFound(); vkDestroyImage(device(), image, NULL); } TEST_F(VkLayerTest, DescriptorSetNotUpdated) { TEST_DESCRIPTION("Bind a descriptor set that hasn't been updated."); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " bound but it was never updated. "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); // We shouldn't need a fragment shader but add it to be able to run // on more devices VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidBufferViewObject) { // Create a single TEXEL_BUFFER descriptor and send it an invalid bufferView // First, cause the bufferView to be invalid due to underlying buffer being destroyed // Then destroy view itself and verify that same error is hit VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); // Create a valid bufferView to start with VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); // First Destroy buffer underlying view which should hit error in CV vkDestroyBuffer(m_device->device(), buffer, NULL); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Now destroy view itself and verify same error, which is hit in PV this time vkDestroyBufferView(m_device->device(), view, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), buffer_memory, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, CreateBufferViewNoMemoryBoundToBuffer) { TEST_DESCRIPTION("Attempt to create a buffer view with a buffer that has no memory bound to it."); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); ASSERT_NO_FATAL_FAILURE(Init()); // Create a buffer with no bound memory and then attempt to create // a buffer view. VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT; buff_ci.size = 256; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = buffer; buff_view_ci.format = VK_FORMAT_R8_UNORM; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buff_view; err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buff_view); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); // If last error is success, it still created the view, so delete it. if (err == VK_SUCCESS) { vkDestroyBufferView(m_device->device(), buff_view, NULL); } } TEST_F(VkLayerTest, InvalidDynamicOffsetCases) { // Create a descriptorSet w/ dynamic descriptor and then hit 3 offset error // cases: // 1. No dynamicOffset supplied // 2. Too many dynamicOffsets supplied // 3. Dynamic offset oversteps buffer being updated VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " requires 1 dynamicOffsets, but only 0 dynamicOffsets are left in pDynamicOffsets "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffer so we can make it to the appropriate error VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), dyub, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = dyub; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0, NULL); m_errorMonitor->VerifyFound(); uint32_t pDynOff[2] = {512, 756}; // Now cause error b/c too many dynOffsets in array for # of dyn descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Attempting to bind 1 descriptorSets with 1 dynamic descriptors, but "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 2, pDynOff); m_errorMonitor->VerifyFound(); // Finally cause error due to dynamicOffset being too big m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " dynamic offset 512 combined with offset 0 and range 1024 that oversteps the buffer size of 1024"); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset size of 512 will overstep buffer // /w range 1024 & size 1024 vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 1, pDynOff); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, DescriptorBufferUpdateNoMemoryBound) { TEST_DESCRIPTION("Attempt to update a descriptor with a non-sparse buffer that doesn't have memory bound"); VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); // Create a buffer to update the descriptor with uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); // Attempt to update descriptor without binding memory to it VkDescriptorBufferInfo buffInfo = {}; buffInfo.buffer = dyub; buffInfo.offset = 0; buffInfo.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = descriptorSet; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = &buffInfo; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), dyub, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPushConstants) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineLayout pipeline_layout; VkPushConstantRange pc_range = {}; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pushConstantRangeCount = 1; pipeline_layout_ci.pPushConstantRanges = &pc_range; // // Check for invalid push constant ranges in pipeline layouts. // struct PipelineLayoutTestCase { VkPushConstantRange const range; char const *msg; }; const uint32_t too_big = m_device->props.limits.maxPushConstantsSize + 0x4; const std::array<PipelineLayoutTestCase, 10> range_tests = {{ {{VK_SHADER_STAGE_VERTEX_BIT, 0, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."}, {{VK_SHADER_STAGE_VERTEX_BIT, 0, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."}, {{VK_SHADER_STAGE_VERTEX_BIT, 4, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."}, {{VK_SHADER_STAGE_VERTEX_BIT, 4, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."}, {{VK_SHADER_STAGE_VERTEX_BIT, 1, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset 1. Offset must"}, {{VK_SHADER_STAGE_VERTEX_BIT, 0, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, too_big, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, too_big, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, 0xFFFFFFF0, 0x00000020}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, {{VK_SHADER_STAGE_VERTEX_BIT, 0x00000020, 0xFFFFFFF0}, "vkCreatePipelineLayout() call has push constants index 0 with offset "}, }}; // Check for invalid offset and size for (const auto &iter : range_tests) { pc_range = iter.range; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } // Check for invalid stage flag pc_range.offset = 0; pc_range.size = 16; pc_range.stageFlags = 0; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreatePipelineLayout: value of pCreateInfo->pPushConstantRanges[0].stageFlags must not be 0"); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Check for duplicate stage flags in a list of push constant ranges. // A shader can only have one push constant block and that block is mapped // to the push constant range that has that shader's stage flag set. // The shader's stage flag can only appear once in all the ranges, so the // implementation can find the one and only range to map it to. const uint32_t ranges_per_test = 5; struct DuplicateStageFlagsTestCase { VkPushConstantRange const ranges[ranges_per_test]; std::vector<char const *> const msg; }; // Overlapping ranges are OK, but a stage flag can appear only once. const std::array<DuplicateStageFlagsTestCase, 3> duplicate_stageFlags_tests = { { {{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 1.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 2.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 2.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 4.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 3 and 4.", }}, {{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}, {VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.", "vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.", }}, {{{VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4}, {VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}, {VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}}, { "vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.", }}, }, }; for (const auto &iter : duplicate_stageFlags_tests) { pipeline_layout_ci.pPushConstantRanges = iter.ranges; pipeline_layout_ci.pushConstantRangeCount = ranges_per_test; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg.begin(), iter.msg.end()); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } // // CmdPushConstants tests // // Setup a pipeline layout with ranges: [0,32) [16,80) const std::vector<VkPushConstantRange> pc_range2 = {{VK_SHADER_STAGE_VERTEX_BIT, 16, 64}, {VK_SHADER_STAGE_FRAGMENT_BIT, 0, 32}}; const VkPipelineLayoutObj pipeline_layout_obj(m_device, {}, pc_range2); const uint8_t dummy_values[100] = {}; m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Check for invalid stage flag // Note that VU 00996 isn't reached due to parameter validation m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdPushConstants: value of stageFlags must not be 0"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), 0, 0, 16, dummy_values); m_errorMonitor->VerifyFound(); // Positive tests for the overlapping ranges m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, dummy_values); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 32, 48, dummy_values); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 16, 16, dummy_values); m_errorMonitor->VerifyNotFound(); // Wrong cmd stages for extant range // No range for all cmd stages -- "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); // Missing cmd stages for found overlapping range -- "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_GEOMETRY_BIT, 0, 16, dummy_values); m_errorMonitor->VerifyFound(); // Wrong no extant range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 80, 4, dummy_values); m_errorMonitor->VerifyFound(); // Wrong overlapping extent m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, 20, dummy_values); m_errorMonitor->VerifyFound(); // Wrong stage flags for valid overlapping range m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796"); vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 16, 16, dummy_values); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DescriptorSetCompatibility) { // Test various desriptorSet errors with bad binding combinations using std::vector; VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const uint32_t NUM_DESCRIPTOR_TYPES = 5; VkDescriptorPoolSize ds_type_count[NUM_DESCRIPTOR_TYPES] = {}; ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count[0].descriptorCount = 10; ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count[1].descriptorCount = 2; ds_type_count[2].type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; ds_type_count[2].descriptorCount = 2; ds_type_count[3].type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count[3].descriptorCount = 5; // TODO : LunarG ILO driver currently asserts in desc.c w/ INPUT_ATTACHMENT // type // ds_type_count[4].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; ds_type_count[4].type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; ds_type_count[4].descriptorCount = 2; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 5; ds_pool_ci.poolSizeCount = NUM_DESCRIPTOR_TYPES; ds_pool_ci.pPoolSizes = ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); static const uint32_t MAX_DS_TYPES_IN_LAYOUT = 2; VkDescriptorSetLayoutBinding dsl_binding[MAX_DS_TYPES_IN_LAYOUT] = {}; dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 5; dsl_binding[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[0].pImmutableSamplers = NULL; // Create layout identical to set0 layout but w/ different stageFlags VkDescriptorSetLayoutBinding dsl_fs_stage_only = {}; dsl_fs_stage_only.binding = 0; dsl_fs_stage_only.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_fs_stage_only.descriptorCount = 5; dsl_fs_stage_only.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; // Different stageFlags to cause error at // bind time dsl_fs_stage_only.pImmutableSamplers = NULL; vector<VkDescriptorSetLayoutObj> ds_layouts; // Create 4 unique layouts for full pipelineLayout, and 1 special fs-only // layout for error case ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); const VkDescriptorSetLayoutObj ds_layout_fs_only(m_device, {dsl_fs_stage_only}); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding[0].descriptorCount = 2; dsl_binding[1].binding = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dsl_binding[1].descriptorCount = 2; dsl_binding[1].stageFlags = VK_SHADER_STAGE_ALL; dsl_binding[1].pImmutableSamplers = NULL; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>({dsl_binding[0], dsl_binding[1]})); dsl_binding[0].binding = 0; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding[0].descriptorCount = 5; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dsl_binding[0].descriptorCount = 2; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0])); const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts); static const uint32_t NUM_SETS = 4; VkDescriptorSet descriptorSet[NUM_SETS] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = ds_vk_layouts.size(); alloc_info.pSetLayouts = ds_vk_layouts.data(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptorSet); ASSERT_VK_SUCCESS(err); VkDescriptorSet ds0_fs_only = {}; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout_fs_only.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &ds0_fs_only); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layouts[0], &ds_layouts[1]}); // Create pipelineLayout with only one setLayout const VkPipelineLayoutObj single_pipe_layout(m_device, {&ds_layouts[0]}); // Create pipelineLayout with 2 descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_one_desc(m_device, {&ds_layouts[3]}); // Create pipelineLayout with 5 SAMPLER descriptor setLayout at index 0 const VkPipelineLayoutObj pipe_layout_five_samp(m_device, {&ds_layouts[2]}); // Create pipelineLayout with UB type, but stageFlags for FS only VkPipelineLayoutObj pipe_layout_fs_only(m_device, {&ds_layout_fs_only}); // Create pipelineLayout w/ incompatible set0 layout, but set1 is fine const VkPipelineLayoutObj pipe_layout_bad_set0(m_device, {&ds_layout_fs_only, &ds_layouts[1]}); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipe_layout_fs_only.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // TODO : Want to cause various binding incompatibility issues here to test // DrawState // First cause various verify_layout_compatibility() fails // Second disturb early and late sets and verify INFO msgs // verify_set_layout_compatibility fail cases: // 1. invalid VkPipelineLayout (layout) passed into vkCmdBindDescriptorSets m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-layout-parameter"); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, (VkPipelineLayout)((size_t)0xbaadb1be), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 2. layoutIndex exceeds # of layouts in layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " attempting to bind set to index 1"); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, single_pipe_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 3. Pipeline setLayout[0] has 2 descriptors, but set being bound has 5 // descriptors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has 2 descriptors, but DescriptorSetLayout "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_one_desc.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 4. same # of descriptors but mismatch in type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is type 'VK_DESCRIPTOR_TYPE_SAMPLER' but binding "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_five_samp.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // 5. same # of descriptors but mismatch in stageFlags m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has stageFlags 16 but binding 0 for DescriptorSetLayout "); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_fs_only.handle(), 0, 1, &descriptorSet[0], 0, NULL); m_errorMonitor->VerifyFound(); // Now that we're done actively using the pipelineLayout that gfx pipeline // was created with, we should be able to delete it. Do that now to verify // that validation obeys pipelineLayout lifetime pipe_layout_fs_only.Reset(); // Cause draw-time errors due to PSO incompatibilities // 1. Error due to not binding required set (we actually use same code as // above to disturb set0) vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_bad_set0.handle(), 1, 1, &descriptorSet[1], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " uses set #0 but that set is not bound."); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // 2. Error due to bound set not being compatible with PSO's // VkPipelineLayout (diff stageFlags in this case) vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2, &descriptorSet[0], 0, NULL); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " bound as set #0 is not compatible with "); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Remaining clean-up m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, NoBeginCommandBuffer) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer() before this call to "); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj commandBuffer(m_device, m_commandPool); // Call EndCommandBuffer() w/o calling BeginCommandBuffer() vkEndCommandBuffer(commandBuffer.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferNullRenderpass) { ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj cb(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); // Force the failure by not setting the Renderpass and Framebuffer fields VkCommandBufferInheritanceInfo cmd_buf_hinfo = {}; cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCommandBufferBeginInfo-flags-00053"); vkBeginCommandBuffer(cb.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedExplicitReset) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded"); // A pool we can reset in. VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); // rerecording of secondary secondary.reset(); // explicit reset here. secondary.begin(); secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedNoReset) { ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded"); // A pool we can reset in. VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); // rerecording of secondary secondary.begin(); // implicit reset in begin secondary.end(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CascadedInvalidation) { ASSERT_NO_FATAL_FAILURE(Init()); VkEventCreateInfo eci = {VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0}; VkEvent event; vkCreateEvent(m_device->device(), &eci, nullptr, &event); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); vkCmdSetEvent(secondary.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_commandBuffer->end(); // destroying the event should invalidate both primary and secondary CB vkDestroyEvent(m_device->device(), event, nullptr); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "invalid because bound Event"); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandBufferResetErrors) { // Cause error due to Begin while recording CB // Then cause 2 errors for attempting to reset CB w/o having // VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT set for the pool from // which CBs were allocated. Note that this bit is off by default. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call Begin on command buffer"); ASSERT_NO_FATAL_FAILURE(Init()); // Calls AllocateCommandBuffers VkCommandBufferObj commandBuffer(m_device, m_commandPool); // Force the failure by setting the Renderpass and Framebuffer fields with (fake) data VkCommandBufferInheritanceInfo cmd_buf_hinfo = {}; cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; VkCommandBufferBeginInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cmd_buf_info.pNext = NULL; cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo; // Begin CB to transition to recording state vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); // Can't re-begin. This should trigger error vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetCommandBuffer-commandBuffer-00046"); VkCommandBufferResetFlags flags = 0; // Don't care about flags for this test // Reset attempt will trigger error due to incorrect CommandPool state vkResetCommandBuffer(commandBuffer.handle(), flags); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBeginCommandBuffer-commandBuffer-00050"); // Transition CB to RECORDED state vkEndCommandBuffer(commandBuffer.handle()); // Now attempting to Begin will implicitly reset, which triggers error vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPipelineCreateState) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo State: Vertex Shader required"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); VkPipelineRasterizationStateCreateInfo rs_state_ci = {}; rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci.polygonMode = VK_POLYGON_MODE_FILL; rs_state_ci.cullMode = VK_CULL_MODE_BACK_BIT; rs_state_ci.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rs_state_ci.depthClampEnable = VK_FALSE; rs_state_ci.rasterizerDiscardEnable = VK_TRUE; rs_state_ci.depthBiasEnable = VK_FALSE; rs_state_ci.lineWidth = 1.0f; VkPipelineVertexInputStateCreateInfo vi_ci = {}; vi_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vi_ci.pNext = nullptr; vi_ci.vertexBindingDescriptionCount = 0; vi_ci.pVertexBindingDescriptions = nullptr; vi_ci.vertexAttributeDescriptionCount = 0; vi_ci.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo ia_ci = {}; ia_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineShaderStageCreateInfo shaderStages[2]; memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); shaderStages[0] = fs.GetStageCreateInfo(); // should be: vs.GetStageCreateInfo(); shaderStages[1] = fs.GetStageCreateInfo(); VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.pViewportState = nullptr; // no viewport b/c rasterizer is disabled gp_ci.pRasterizationState = &rs_state_ci; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout.handle(); gp_ci.renderPass = renderPass(); gp_ci.pVertexInputState = &vi_ci; gp_ci.pInputAssemblyState = &ia_ci; gp_ci.stageCount = 1; gp_ci.pStages = shaderStages; VkPipelineCacheCreateInfo pc_ci = {}; pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci.initialDataSize = 0; pc_ci.pInitialData = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureDisable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Disable sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); device_features.sampleRateShading = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Cause the error by enabling sample shading... auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784"); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureEnable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Require sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (device_features.sampleRateShading == VK_FALSE) { printf("%s SampleRateShading feature is disabled -- skipping related checks.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto range_test = [this](float value, bool positive_test) { auto info_override = [value](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; helper.pipe_ms_state_ci_.minSampleShading = value; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786", positive_test); }; range_test(NearestSmaller(0.0F), false); range_test(NearestGreater(1.0F), false); range_test(0.0F, /* positive_test= */ true); range_test(1.0F, /* positive_test= */ true); } TEST_F(VkLayerTest, InvalidPipelineSamplePNext) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Set up the extension structs auto sampleLocations = chain_util::Init<VkPipelineSampleLocationsStateCreateInfoEXT>(); auto coverageToColor = chain_util::Init<VkPipelineCoverageToColorStateCreateInfoNV>(); auto coverageModulation = chain_util::Init<VkPipelineCoverageModulationStateCreateInfoNV>(); auto discriminatrix = [this](const char *name) { return DeviceExtensionSupported(gpu(), nullptr, name); }; chain_util::ExtensionChain chain(discriminatrix, &m_device_extension_names); chain.Add(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, sampleLocations); chain.Add(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME, coverageToColor); chain.Add(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME, coverageModulation); const void *extension_head = chain.Head(); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (extension_head) { auto good_chain = [extension_head](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = extension_head; }; CreatePipelineHelper::OneshotTest(*this, good_chain, (VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT), "No error", true); } else { printf("%s Required extension not present -- skipping positive checks.\n", kSkipPrefix); } auto instance_ci = chain_util::Init<VkInstanceCreateInfo>(); auto bad_chain = [&instance_ci](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = &instance_ci; }; CreatePipelineHelper::OneshotTest(*this, bad_chain, VK_DEBUG_REPORT_WARNING_BIT_EXT, "VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext"); } /*// TODO : This test should be good, but needs Tess support in compiler to run TEST_F(VkLayerTest, InvalidPatchControlPoints) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH primitive "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), VK_DESCRIPTOR_POOL_USAGE_NON_FREE, 1, &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dsl_binding; VkDescriptorSetLayout ds_layout; err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorSet descriptorSet; err = vkAllocateDescriptorSets(m_device->device(), ds_pool, VK_DESCRIPTOR_SET_USAGE_NON_FREE, 1, &ds_layout, &descriptorSet); ASSERT_VK_SUCCESS(err); VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout; err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); ASSERT_VK_SUCCESS(err); VkPipelineShaderStageCreateInfo shaderStages[3]; memset(&shaderStages, 0, 3 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(m_device,bindStateVertShaderText,VK_SHADER_STAGE_VERTEX_BIT, this); // Just using VS txt for Tess shaders as we don't care about functionality VkShaderObj tc(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj te(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); shaderStages[0].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; shaderStages[0].shader = vs.handle(); shaderStages[1].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[1].stage = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT; shaderStages[1].shader = tc.handle(); shaderStages[2].sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO; shaderStages[2].stage = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; shaderStages[2].shader = te.handle(); VkPipelineInputAssemblyStateCreateInfo iaCI = {}; iaCI.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; iaCI.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; VkPipelineTessellationStateCreateInfo tsCI = {}; tsCI.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO; tsCI.patchControlPoints = 0; // This will cause an error VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.pNext = NULL; gp_ci.stageCount = 3; gp_ci.pStages = shaderStages; gp_ci.pVertexInputState = NULL; gp_ci.pInputAssemblyState = &iaCI; gp_ci.pTessellationState = &tsCI; gp_ci.pViewportState = NULL; gp_ci.pRasterizationState = NULL; gp_ci.pMultisampleState = NULL; gp_ci.pDepthStencilState = NULL; gp_ci.pColorBlendState = NULL; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout; gp_ci.renderPass = renderPass(); VkPipelineCacheCreateInfo pc_ci = {}; pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; pc_ci.pNext = NULL; pc_ci.initialSize = 0; pc_ci.initialData = 0; pc_ci.maxSize = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL); vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } */ TEST_F(VkLayerTest, PSOViewportStateTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for non-multiViewport"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto break_vp_state = [](CreatePipelineHelper &helper) { helper.rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; helper.gp_ci_.pViewportState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_vp_state, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750"); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; // test viewport and scissor arrays using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, nullptr, 1, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {1, viewports, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {1, nullptr, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } vector<TestCase> dyn_test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } // Set Extension dynamic states without enabling the required Extensions. TEST_F(VkLayerTest, ExtensionDynamicStatesSetWOExtensionEnabled) { TEST_DESCRIPTION("Create a graphics pipeline with Extension dynamic states without enabling the required Extensions."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); using std::vector; struct TestCase { uint32_t dynamic_state_count; VkDynamicState dynamic_state; char const *errmsg; }; vector<TestCase> dyn_test_cases = { {1, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, "contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but VK_NV_clip_space_w_scaling"}, {1, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, "contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but VK_EXT_discard_rectangles"}, {1, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, "contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but VK_EXT_sample_locations"}, }; for (const auto &test_case : dyn_test_cases) { VkDynamicState state[1]; state[0] = test_case.dynamic_state; const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = test_case.dynamic_state_count; dyn_state_ci.pDynamicStates = state; helper.dyn_state_ci_ = dyn_state_ci; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.errmsg); } } TEST_F(VkLayerTest, PSOViewportStateMultiViewportTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for multiViewport feature"); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } // at least 16 viewports supported from here on ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {2, nullptr, 2, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {2, viewports, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; const auto max_viewports = m_device->phy().properties().limits.maxViewports; const bool max_viewports_maxxed = max_viewports == std::numeric_limits<decltype(max_viewports)>::max(); if (max_viewports_maxxed) { printf("%s VkPhysicalDeviceLimits::maxViewports is UINT32_MAX -- skipping part of test requiring to exceed maxViewports.\n", kSkipPrefix); } else { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}); test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); test_cases.push_back( {too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); } for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } vector<TestCase> dyn_test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; if (!max_viewports_maxxed) { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr dyn_test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219"}}); } const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = {}; dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids); } } TEST_F(VkLayerTest, DynViewportAndScissorUndefinedDrawState) { TEST_DESCRIPTION("Test viewport and scissor dynamic state that is not set before draw"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: should also test on !multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiple viewports/scissors; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineObj pipeline_dyn_vp(m_device); pipeline_dyn_vp.AddShader(&vs); pipeline_dyn_vp.AddShader(&fs); pipeline_dyn_vp.AddDefaultColorAttachment(); pipeline_dyn_vp.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); pipeline_dyn_vp.SetScissor(m_scissors); ASSERT_VK_SUCCESS(pipeline_dyn_vp.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); VkPipelineObj pipeline_dyn_sc(m_device); pipeline_dyn_sc.AddShader(&vs); pipeline_dyn_sc.AddShader(&fs); pipeline_dyn_sc.AddDefaultColorAttachment(); pipeline_dyn_sc.SetViewport(m_viewports); pipeline_dyn_sc.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); ASSERT_VK_SUCCESS(pipeline_dyn_sc.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic viewport(s) 0 are used by pipeline state object, "); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_vp.handle()); vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, &m_viewports[0]); // Forgetting to set needed 0th viewport (PSO viewportCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, "); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_sc.handle()); vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, &m_scissors[0]); // Forgetting to set needed 0th scissor (PSO scissorCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOLineWidthInvalid) { TEST_DESCRIPTION("Test non-1.0 lineWidth errors when pipeline is created and in vkCmdSetLineWidth"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo shader_state_cis[] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; VkPipelineVertexInputStateCreateInfo vi_state_ci = {}; vi_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; VkPipelineInputAssemblyStateCreateInfo ia_state_ci = {}; ia_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; ia_state_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo vp_state_ci = {}; vp_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_ci.viewportCount = 1; vp_state_ci.pViewports = &viewport; vp_state_ci.scissorCount = 1; vp_state_ci.pScissors = &scissor; VkPipelineRasterizationStateCreateInfo rs_state_ci = {}; rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_state_ci.rasterizerDiscardEnable = VK_FALSE; // lineWidth to be set by checks VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; // must match subpass att. VkPipelineColorBlendAttachmentState cba_state = {}; VkPipelineColorBlendStateCreateInfo cb_state_ci = {}; cb_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; cb_state_ci.attachmentCount = 1; // must match count in subpass cb_state_ci.pAttachments = &cba_state; const VkPipelineLayoutObj pipeline_layout(m_device); VkGraphicsPipelineCreateInfo gp_ci = {}; gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; gp_ci.stageCount = sizeof(shader_state_cis) / sizeof(VkPipelineShaderStageCreateInfo); gp_ci.pStages = shader_state_cis; gp_ci.pVertexInputState = &vi_state_ci; gp_ci.pInputAssemblyState = &ia_state_ci; gp_ci.pViewportState = &vp_state_ci; gp_ci.pRasterizationState = &rs_state_ci; gp_ci.pMultisampleState = &ms_state_ci; gp_ci.pColorBlendState = &cb_state_ci; gp_ci.layout = pipeline_layout.handle(); gp_ci.renderPass = renderPass(); gp_ci.subpass = 0; const std::vector<float> test_cases = {-1.0f, 0.0f, NearestSmaller(1.0f), NearestGreater(1.0f), NAN}; // test VkPipelineRasterizationStateCreateInfo::lineWidth for (const auto test_case : test_cases) { rs_state_ci.lineWidth = test_case; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), VK_NULL_HANDLE, 1, &gp_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // test vkCmdSetLineWidth m_commandBuffer->begin(); for (const auto test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetLineWidth-lineWidth-00788"); vkCmdSetLineWidth(m_commandBuffer->handle(), test_case); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_binding_00618) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-binding-00618: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = m_device->props.limits.maxVertexInputBindings; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 0; vertex_input_state.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-binding-00618"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_stride_00619) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-stride-00619: stride must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputBindingStride"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when stride is greater than VkPhysicalDeviceLimits::maxVertexInputBindingStride. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride + 1; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 0; vertex_input_state.pVertexAttributeDescriptions = nullptr; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-stride-00619"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_location_00620) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-location-00620: location must be less than " "VkPhysicalDeviceLimits::maxVertexInputAttributes"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when location is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.location = m_device->props.limits.maxVertexInputAttributes; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 0; vertex_input_state.pVertexBindingDescriptions = nullptr; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-location-00620"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_binding_00621) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-binding-00621: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.binding = m_device->props.limits.maxVertexInputBindings; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 0; vertex_input_state.pVertexBindingDescriptions = nullptr; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkPipelineViewportStateCreateInfo viewport_state{}; viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; viewport_state.viewportCount = 1; viewport_state.pViewports = &viewport; viewport_state.scissorCount = 1; viewport_state.pScissors = &scissor; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_FALSE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = &viewport_state; create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-binding-00621"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_offset_00622) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-offset-00622: offset must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputAttributeOffset"); EnableDeviceProfileLayer(); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); uint32_t maxVertexInputAttributeOffset = 0; { VkPhysicalDeviceProperties device_props = {}; vkGetPhysicalDeviceProperties(gpu(), &device_props); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; if (maxVertexInputAttributeOffset == 0xFFFFFFFF) { // Attempt to artificially lower maximum offset PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); if (!fpvkSetPhysicalDeviceLimitsEXT) { printf("%s All offsets are valid & device_profile_api not found; skipped.\n", kSkipPrefix); return; } device_props.limits.maxVertexInputAttributeOffset = device_props.limits.maxVertexInputBindingStride - 2; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &device_props.limits); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; } } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCache pipeline_cache; { VkPipelineCacheCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache); ASSERT_VK_SUCCESS(err); } VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineShaderStageCreateInfo stages[2]{{}}; stages[0] = vs.GetStageCreateInfo(); stages[1] = fs.GetStageCreateInfo(); VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = 0; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride; vertex_input_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; // Test when offset is greater than maximum. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.format = VK_FORMAT_R8_UNORM; vertex_input_attribute_description.offset = maxVertexInputAttributeOffset + 1; VkPipelineVertexInputStateCreateInfo vertex_input_state{}; vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; vertex_input_state.pNext = nullptr; vertex_input_state.vertexBindingDescriptionCount = 1; vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description; vertex_input_state.vertexAttributeDescriptionCount = 1; vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description; VkPipelineInputAssemblyStateCreateInfo input_assembly_state{}; input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineMultisampleStateCreateInfo multisample_state{}; multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; multisample_state.pNext = nullptr; multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; multisample_state.sampleShadingEnable = 0; multisample_state.minSampleShading = 1.0; multisample_state.pSampleMask = nullptr; VkPipelineRasterizationStateCreateInfo rasterization_state{}; rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state.polygonMode = VK_POLYGON_MODE_FILL; rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE; rasterization_state.depthClampEnable = VK_FALSE; rasterization_state.rasterizerDiscardEnable = VK_TRUE; rasterization_state.depthBiasEnable = VK_FALSE; rasterization_state.lineWidth = 1.0f; const VkPipelineLayoutObj pipeline_layout(m_device); { VkGraphicsPipelineCreateInfo create_info{}; create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; create_info.stageCount = 2; create_info.pStages = stages; create_info.pVertexInputState = &vertex_input_state; create_info.pInputAssemblyState = &input_assembly_state; create_info.pViewportState = nullptr; // no viewport b/c rasterizer is disabled create_info.pMultisampleState = &multisample_state; create_info.pRasterizationState = &rasterization_state; create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; create_info.layout = pipeline_layout.handle(); create_info.renderPass = renderPass(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-offset-00622"); VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr); } TEST_F(VkLayerTest, NullRenderPass) { // Bind a NULL RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBeginRenderPass: required parameter pRenderPassBegin specified as NULL"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Don't care about RenderPass handle b/c error should be flagged before // that vkCmdBeginRenderPass(m_commandBuffer->handle(), NULL, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, RenderPassWithinRenderPass) { // Bind a BeginRenderPass within an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Just create a dummy Renderpass that's non-NULL so we can get to the // proper error vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, RenderPassClearOpMismatch) { TEST_DESCRIPTION( "Begin a renderPass where clearValueCount is less than the number of renderPass attachments that use " "loadOpVK_ATTACHMENT_LOAD_OP_CLEAR."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create a renderPass with a single attachment that uses loadOp CLEAR VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM; // Set loadOp to CLEAR attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkCommandBufferInheritanceInfo hinfo = {}; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.renderPass = VK_NULL_HANDLE; hinfo.subpass = 0; hinfo.framebuffer = VK_NULL_HANDLE; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; VkCommandBufferBeginInfo info = {}; info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.pInheritanceInfo = &hinfo; vkBeginCommandBuffer(m_commandBuffer->handle(), &info); VkRenderPassBeginInfo rp_begin = {}; rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rp_begin.pNext = NULL; rp_begin.renderPass = renderPass(); rp_begin.framebuffer = framebuffer(); rp_begin.clearValueCount = 0; // Should be 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassBeginInfo-clearValueCount-00902"); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rp_begin, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, EndCommandBufferWithinRenderPass) { TEST_DESCRIPTION("End a command buffer with an active render pass"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); // End command buffer properly to avoid driver issues. This is safe -- the // previous vkEndCommandBuffer should not have reached the driver. m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // TODO: Add test for VK_COMMAND_BUFFER_LEVEL_SECONDARY // TODO: Add test for VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT } TEST_F(VkLayerTest, FillBufferWithinRenderPass) { // Call CmdFillBuffer within an active renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj dstBuffer; dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs); m_commandBuffer->FillBuffer(dstBuffer.handle(), 0, 4, 0x11111111); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, UpdateBufferWithinRenderPass) { // Call CmdUpdateBuffer within an active renderpass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; VkBufferObj dstBuffer; dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs); VkDeviceSize dstOffset = 0; uint32_t Data[] = {1, 2, 3, 4, 5, 6, 7, 8}; VkDeviceSize dataSize = sizeof(Data) / sizeof(uint32_t); vkCmdUpdateBuffer(m_commandBuffer->handle(), dstBuffer.handle(), dstOffset, dataSize, &Data); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearColorImageWithBadRange) { TEST_DESCRIPTION("Record clear color with an invalid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ClearDepthStencilWithBadRange) { TEST_DESCRIPTION("Record clear depth with an invalid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearDepthStencilValue clear_value = {}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"); const VkImageSubresourceRange range = {ds_aspect, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 1, 1, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 0, 0, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694"); const VkImageSubresourceRange range = {ds_aspect, 0, 2, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 0}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695"); const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 2}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ClearColorImageWithinRenderPass) { // Call CmdClearColorImage within an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "It is invalid to issue this call inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkClearColorValue clear_color; memset(clear_color.uint32, 0, sizeof(uint32_t) * 4); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image dstImage; dstImage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT); vkCmdClearColorImage(m_commandBuffer->handle(), dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &range); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearDepthStencilImageErrors) { // Hit errors related to vkCmdClearDepthStencilImage() // 1. Use an image that doesn't have VK_IMAGE_USAGE_TRANSFER_DST_BIT set // 2. Call CmdClearDepthStencilImage within an active RenderPass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkClearDepthStencilValue clear_value = {0}; VkMemoryPropertyFlags reqs = 0; VkImageCreateInfo image_create_info = vk_testing::Image::create_info(); image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = depth_format; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Error here is that VK_IMAGE_USAGE_TRANSFER_DST_BIT is excluded for DS image that we'll call Clear on below image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; vk_testing::Image dst_image_bad_usage; dst_image_bad_usage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-image-00009"); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image_bad_usage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range); m_errorMonitor->VerifyFound(); // Fix usage for next test case image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image dst_image; dst_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-renderpass"); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ClearColorAttachmentsOutsideRenderPass) { // Call CmdClearAttachmentss outside of an active RenderPass m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearAttachments(): This call must be issued inside an active render pass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Start no RenderPass m_commandBuffer->begin(); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {32, 32}}}; vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassExcessiveNextSubpass) { TEST_DESCRIPTION("Test that an error is produced when CmdNextSubpass is called too many times in a renderpass instance"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdNextSubpass(): Attempted to advance beyond final subpass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // error here. vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, RenderPassEndedBeforeFinalSubpass) { TEST_DESCRIPTION("Test that an error is produced when CmdEndRenderPass is called before the final subpass has been reached"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdEndRenderPass(): Called before reaching final subpass"); ASSERT_NO_FATAL_FAILURE(Init()); VkSubpassDescription sd[2] = {{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}, {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}}; VkRenderPassCreateInfo rcpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rcpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 16, 16, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // no implicit RP begin VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {16, 16}}, 0, nullptr}; vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); // Error here. vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); // Clean up. vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, BufferMemoryBarrierNoBuffer) { // Try to add a buffer memory barrier with no buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pBufferMemoryBarriers[0].buffer specified as VK_NULL_HANDLE"); ASSERT_NO_FATAL_FAILURE(Init()); m_commandBuffer->begin(); VkBufferMemoryBarrier buf_barrier = {}; buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buf_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.buffer = VK_NULL_HANDLE; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidBarriers) { TEST_DESCRIPTION("A variety of ways to get VK_INVALID_BARRIER "); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } // Add a token self-dependency for this test to avoid unexpected errors m_addRenderPassSelfDependency = true; ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Use image unbound to memory in barrier m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()"); vk_testing::Image unbound_image; auto unbound_image_info = vk_testing::Image::create_info(); unbound_image_info.format = VK_FORMAT_B8G8R8A8_UNORM; unbound_image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; unbound_image.init_no_mem(*m_device, unbound_image_info); auto unbound_subresource = vk_testing::Image::subresource_range(unbound_image_info, VK_IMAGE_ASPECT_COLOR_BIT); auto unbound_image_barrier = unbound_image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, unbound_subresource); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1, &unbound_image_barrier); m_errorMonitor->VerifyFound(); // Use buffer unbound to memory in barrier m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindBufferMemory()"); VkBufferObj unbound_buffer; auto unbound_buffer_info = VkBufferObj::create_info(16, VK_IMAGE_USAGE_TRANSFER_DST_BIT); unbound_buffer.init_no_mem(*m_device, unbound_buffer_info); auto unbound_buffer_barrier = unbound_buffer.buffer_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, 0, 16); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 1, &unbound_buffer_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-newLayout-01198"); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; // New layout can't be UNDEFINED img_barrier.newLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.image = m_renderTargets[0]->handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Transition image to color attachment optimal img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); // TODO: this looks vestigal or incomplete... m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; // Can't send buffer memory barrier during a render pass vkCmdEndRenderPass(m_commandBuffer->handle()); // Duplicate barriers that change layout img_barrier.image = image.handle(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; VkImageMemoryBarrier img_barriers[2] = {img_barrier, img_barrier}; // Transitions from UNDEFINED are valid, even if duplicated m_errorMonitor->ExpectSuccess(); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 2, img_barriers); m_errorMonitor->VerifyNotFound(); // Duplication of layout transitions (not from undefined) are not valid img_barriers[0].oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barriers[0].newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barriers[1].oldLayout = img_barriers[0].oldLayout; img_barriers[1].newLayout = img_barriers[0].newLayout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 2, img_barriers); m_errorMonitor->VerifyFound(); VkBufferObj buffer; VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; buffer.init_as_src_and_dst(*m_device, 256, mem_reqs); VkBufferMemoryBarrier buf_barrier = {}; buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER; buf_barrier.pNext = NULL; buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; buf_barrier.buffer = buffer.handle(); buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-offset-01187"); // Exceed the buffer size buf_barrier.offset = buffer.create_info().size + 1; // Offset greater than total size vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); buf_barrier.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-size-01189"); buf_barrier.size = buffer.create_info().size + 1; // Size greater than total size vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Now exercise barrier aspect bit errors, first DS m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207"); VkDepthStencilObj ds_image(m_device); ds_image.Init(m_device, 128, 128, depth_format); ASSERT_TRUE(ds_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = ds_image.handle(); // Not having DEPTH or STENCIL set is an error img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Having only one of depth or stencil set for DS image is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207"); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Having anything other than DEPTH and STENCIL is an error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // Now test depth-only VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { VkDepthStencilObj d_image(m_device); d_image.Init(m_device, 128, 128, VK_FORMAT_D16_UNORM); ASSERT_TRUE(d_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = d_image.handle(); // DEPTH bit must be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // No bits other than DEPTH may be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Now test stencil-only vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &format_props); if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) { VkDepthStencilObj s_image(m_device); s_image.Init(m_device, 128, 128, VK_FORMAT_S8_UINT); ASSERT_TRUE(s_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = s_image.handle(); // Use of COLOR aspect on depth image is error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Finally test color VkImageObj c_image(m_device); c_image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(c_image.initialized()); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = c_image.handle(); // COLOR bit must be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // No bits other than COLOR may be set m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set."); img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); // A barrier's new and old VkImageLayout must be compatible with an image's VkImageUsageFlags. { VkImageObj img_color(m_device); img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_color.initialized()); VkImageObj img_ds(m_device); img_ds.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds.initialized()); VkImageObj img_xfer_src(m_device); img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_src.initialized()); VkImageObj img_xfer_dst(m_device); img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_dst.initialized()); VkImageObj img_sampled(m_device); img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_sampled.initialized()); VkImageObj img_input(m_device); img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_input.initialized()); const struct { VkImageObj &image_obj; VkImageLayout bad_layout; std::string msg_code; } bad_buffer_layouts[] = { // clang-format off // images _without_ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT {img_ds, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_xfer_src, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_xfer_dst, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_sampled, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, {img_input, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"}, // images _without_ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT {img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"}, {img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, {img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"}, // images _without_ VK_IMAGE_USAGE_SAMPLED_BIT or VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT {img_color, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_ds, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_xfer_src, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, {img_xfer_dst, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"}, // images _without_ VK_IMAGE_USAGE_TRANSFER_SRC_BIT {img_color, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_ds, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_sampled, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, {img_input, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"}, // images _without_ VK_IMAGE_USAGE_TRANSFER_DST_BIT {img_color, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_ds, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_sampled, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, {img_input, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"}, // clang-format on }; const uint32_t layout_count = sizeof(bad_buffer_layouts) / sizeof(bad_buffer_layouts[0]); for (uint32_t i = 0; i < layout_count; ++i) { img_barrier.image = bad_buffer_layouts[i].image_obj.handle(); const VkImageUsageFlags usage = bad_buffer_layouts[i].image_obj.usage(); img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.oldLayout = bad_buffer_layouts[i].bad_layout; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = bad_buffer_layouts[i].bad_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; } // Attempt barrier where srcAccessMask is not supported by srcStageMask m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184"); // Have lower-order bit that's supported (shader write), but higher-order bit not supported to verify multi-bit validation buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT; buf_barrier.offset = 0; buf_barrier.size = VK_WHOLE_SIZE; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Attempt barrier where dsAccessMask is not supported by dstStageMask buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Attempt to mismatch barriers/waitEvents calls with incompatible queues // Create command pool with incompatible queueflags const std::vector<VkQueueFamilyProperties> queue_props = m_device->queue_props; uint32_t queue_family_index = m_device->QueueFamilyMatching(VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT); if (queue_family_index == UINT32_MAX) { printf("%s No non-compute queue supporting graphics found; skipped.\n", kSkipPrefix); return; // NOTE: this exits the test function! } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01183"); VkCommandPoolObj command_pool(m_device, queue_family_index, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj bad_command_buffer(m_device, &command_pool); bad_command_buffer.begin(); buf_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; // Set two bits that should both be supported as a bonus positive check buf_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT; vkCmdPipelineBarrier(bad_command_buffer.handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr); m_errorMonitor->VerifyFound(); // Check for error for trying to wait on pipeline stage not supported by this queue. Specifically since our queue is not a // compute queue, vkCmdWaitEvents cannot have it's source stage mask be VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-01164"); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); vkCmdWaitEvents(bad_command_buffer.handle(), 1, &event, /*source stage mask*/ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, nullptr, 0, nullptr, 0, nullptr); m_errorMonitor->VerifyFound(); bad_command_buffer.end(); vkDestroyEvent(m_device->device(), event, nullptr); } // Helpers for the tests below static void ValidOwnershipTransferOp(ErrorMonitor *monitor, VkCommandBufferObj *cb, VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) { monitor->ExpectSuccess(); cb->begin(); uint32_t num_buf_barrier = (buf_barrier) ? 1 : 0; uint32_t num_img_barrier = (img_barrier) ? 1 : 0; cb->PipelineBarrier(src_stages, dst_stages, 0, 0, nullptr, num_buf_barrier, buf_barrier, num_img_barrier, img_barrier); cb->end(); cb->QueueCommandBuffer(); // Implicitly waits monitor->VerifyNotFound(); } static void ValidOwnershipTransfer(ErrorMonitor *monitor, VkCommandBufferObj *cb_from, VkCommandBufferObj *cb_to, VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) { ValidOwnershipTransferOp(monitor, cb_from, src_stages, dst_stages, buf_barrier, img_barrier); ValidOwnershipTransferOp(monitor, cb_to, src_stages, dst_stages, buf_barrier, img_barrier); } TEST_F(VkPositiveLayerTest, OwnershipTranfersImage) { TEST_DESCRIPTION("Valid image ownership transfers that shouldn't create errors"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (no_gfx == UINT32_MAX) { printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix); return; } VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get(); VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue); // Create an "exclusive" image owned by the graphics queue. VkImageObj image(m_device); VkFlags image_use = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, image_use, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); auto image_subres = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1); auto image_barrier = image.image_memory_barrier(0, 0, image.Layout(), image.Layout(), image_subres); image_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_; image_barrier.dstQueueFamilyIndex = no_gfx; ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, nullptr, &image_barrier); // Change layouts while changing ownership image_barrier.srcQueueFamilyIndex = no_gfx; image_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; image_barrier.oldLayout = image.Layout(); // Make sure the new layout is different from the old if (image_barrier.oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; } else { image_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; } ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, nullptr, &image_barrier); } TEST_F(VkPositiveLayerTest, OwnershipTranfersBuffer) { TEST_DESCRIPTION("Valid buffer ownership transfers that shouldn't create errors"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (no_gfx == UINT32_MAX) { printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix); return; } VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get(); VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue); // Create a buffer const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size] = {0xFF}; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); auto buffer_barrier = buffer.buffer_memory_barrier(0, 0, 0, VK_WHOLE_SIZE); // Let gfx own it. buffer_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_; buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; ValidOwnershipTransferOp(m_errorMonitor, m_commandBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr); // Transfer it to non-gfx buffer_barrier.dstQueueFamilyIndex = no_gfx; ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr); // Transfer it to gfx buffer_barrier.srcQueueFamilyIndex = no_gfx; buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_; ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, &buffer_barrier, nullptr); } class BarrierQueueFamilyTestHelper { public: struct QueueFamilyObjs { uint32_t index; // We would use std::unique_ptr, but this triggers a compiler error on older compilers VkQueueObj *queue = nullptr; VkCommandPoolObj *command_pool = nullptr; VkCommandBufferObj *command_buffer = nullptr; VkCommandBufferObj *command_buffer2 = nullptr; ~QueueFamilyObjs() { delete command_buffer2; delete command_buffer; delete command_pool; delete queue; } void Init(VkDeviceObj *device, uint32_t qf_index, VkQueue qf_queue, VkCommandPoolCreateFlags cp_flags) { index = qf_index; queue = new VkQueueObj(qf_queue, qf_index); command_pool = new VkCommandPoolObj(device, qf_index, cp_flags); command_buffer = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue); command_buffer2 = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue); }; }; struct Context { VkLayerTest *layer_test; uint32_t default_index; std::unordered_map<uint32_t, QueueFamilyObjs> queue_families; Context(VkLayerTest *test, const std::vector<uint32_t> &queue_family_indices) : layer_test(test) { if (0 == queue_family_indices.size()) { return; // This is invalid } VkDeviceObj *device_obj = layer_test->DeviceObj(); queue_families.reserve(queue_family_indices.size()); default_index = queue_family_indices[0]; for (auto qfi : queue_family_indices) { VkQueue queue = device_obj->queue_family_queues(qfi)[0]->handle(); queue_families.emplace(std::make_pair(qfi, QueueFamilyObjs())); queue_families[qfi].Init(device_obj, qfi, queue, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); } Reset(); } void Reset() { layer_test->DeviceObj()->wait(); for (auto &qf : queue_families) { vkResetCommandPool(layer_test->device(), qf.second.command_pool->handle(), 0); } } }; BarrierQueueFamilyTestHelper(Context *context) : context_(context), image_(context->layer_test->DeviceObj()) {} // Init with queue families non-null for CONCURRENT sharing mode (which requires them) void Init(std::vector<uint32_t> *families) { VkDeviceObj *device_obj = context_->layer_test->DeviceObj(); image_.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0, families); ASSERT_TRUE(image_.initialized()); image_barrier_ = image_.image_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, image_.Layout(), image_.Layout(), image_.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1)); VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; buffer_.init_as_src_and_dst(*device_obj, 256, mem_prop, families); ASSERT_TRUE(buffer_.initialized()); buffer_barrier_ = buffer_.buffer_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0, VK_WHOLE_SIZE); } QueueFamilyObjs *GetQueueFamilyInfo(Context *context, uint32_t qfi) { QueueFamilyObjs *qf; auto qf_it = context->queue_families.find(qfi); if (qf_it != context->queue_families.end()) { qf = &(qf_it->second); } else { qf = &(context->queue_families[context->default_index]); } return qf; } enum Modifier { NONE, DOUBLE_RECORD, DOUBLE_COMMAND_BUFFER, }; void operator()(std::string img_err, std::string buf_err, uint32_t src, uint32_t dst, bool positive = false, uint32_t queue_family_index = kInvalidQueueFamily, Modifier mod = Modifier::NONE) { auto monitor = context_->layer_test->Monitor(); monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, img_err); monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, buf_err); image_barrier_.srcQueueFamilyIndex = src; image_barrier_.dstQueueFamilyIndex = dst; buffer_barrier_.srcQueueFamilyIndex = src; buffer_barrier_.dstQueueFamilyIndex = dst; QueueFamilyObjs *qf = GetQueueFamilyInfo(context_, queue_family_index); VkCommandBufferObj *command_buffer = qf->command_buffer; for (int cb_repeat = 0; cb_repeat < (mod == Modifier::DOUBLE_COMMAND_BUFFER ? 2 : 1); cb_repeat++) { command_buffer->begin(); for (int repeat = 0; repeat < (mod == Modifier::DOUBLE_RECORD ? 2 : 1); repeat++) { vkCmdPipelineBarrier(command_buffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buffer_barrier_, 1, &image_barrier_); } command_buffer->end(); command_buffer = qf->command_buffer2; // Second pass (if any) goes to the secondary command_buffer. } if (queue_family_index != kInvalidQueueFamily) { if (mod == Modifier::DOUBLE_COMMAND_BUFFER) { // the Fence resolves to VK_NULL_HANLE... i.e. no fence qf->queue->submit({{qf->command_buffer, qf->command_buffer2}}, vk_testing::Fence(), positive); } else { qf->command_buffer->QueueCommandBuffer(positive); // Check for success on positive tests only } } if (positive) { monitor->VerifyNotFound(); } else { monitor->VerifyFound(); } context_->Reset(); }; protected: static const uint32_t kInvalidQueueFamily = UINT32_MAX; Context *context_; VkImageObj image_; VkImageMemoryBarrier image_barrier_; VkBufferObj buffer_; VkBufferMemoryBarrier buffer_barrier_; }; TEST_F(VkLayerTest, InvalidBarrierQueueFamily) { TEST_DESCRIPTION("Create and submit barriers with invalid queue families"); ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Find queues of two families const uint32_t submit_family = m_device->graphics_queue_node_index_; const uint32_t invalid = static_cast<uint32_t>(m_device->queue_props.size()); const uint32_t other_family = submit_family != 0 ? 0 : 1; const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0); std::vector<uint32_t> qf_indices{{submit_family, other_family}}; if (only_one_family) { qf_indices.resize(1); } BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices); if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf( "%s Device has apiVersion greater than 1.0 -- skipping test cases that require external memory " "to be " "disabled.\n", kSkipPrefix); } else { if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix); } else { std::vector<uint32_t> families = {submit_family, other_family}; BarrierQueueFamilyTestHelper conc_test(&test_context); conc_test.Init(&families); // core_validation::barrier_queue_families::kSrcAndDestMustBeIgnore conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED, submit_family); conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family, VK_QUEUE_FAMILY_IGNORED); conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family, submit_family); // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); } BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode. // core_validation::barrier_queue_families::kBothIgnoreOrBothValid excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED, submit_family); excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, VK_QUEUE_FAMILY_IGNORED); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); } if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_EXCLUSIVE submit testcases skipped.\n", kSkipPrefix); } else { BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // core_validation::barrier_queue_families::kSubmitQueueMustMatchSrcOrDst excl_test("VUID-VkImageMemoryBarrier-image-01205", "VUID-VkBufferMemoryBarrier-buffer-01196", other_family, other_family, false, submit_family); // true -> positive test (testing both the index logic and the QFO transfer tracking. excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, other_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, other_family); excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, submit_family); // negative testing for QFO transfer tracking // Duplicate release in one CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family, other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD); // Duplicate pending release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00003", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00003", submit_family, other_family, false, submit_family); // Duplicate acquire in one CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family, other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD); // No pending release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00004", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00004", submit_family, other_family, false, other_family); // Duplicate release in two CB excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family, other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER); // Duplicate acquire in two CB excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); // need a succesful release excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family, other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER); } } TEST_F(VkLayerTest, InvalidBarrierQueueFamilyWithMemExt) { TEST_DESCRIPTION("Create and submit barriers with invalid queue families when memory extension is enabled "); std::vector<const char *> reqd_instance_extensions = { {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}}; for (auto extension_name : reqd_instance_extensions) { if (InstanceExtensionSupported(extension_name)) { m_instance_extension_names.push_back(extension_name); } else { printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name); return; } } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external memory device extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); } else { printf("%s External memory extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); // Find queues of two families const uint32_t submit_family = m_device->graphics_queue_node_index_; const uint32_t invalid = static_cast<uint32_t>(m_device->queue_props.size()); const uint32_t other_family = submit_family != 0 ? 0 : 1; const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0); std::vector<uint32_t> qf_indices{{submit_family, other_family}}; if (only_one_family) { qf_indices.resize(1); } BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices); if (only_one_family) { printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix); } else { std::vector<uint32_t> families = {submit_family, other_family}; BarrierQueueFamilyTestHelper conc_test(&test_context); // core_validation::barrier_queue_families::kSrcOrDstMustBeIgnore conc_test.Init(&families); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", submit_family, submit_family); // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_EXTERNAL_KHR, VK_QUEUE_FAMILY_IGNORED, true); // core_validation::barrier_queue_families::kSpecialOrIgnoreOnly conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", submit_family, VK_QUEUE_FAMILY_IGNORED); conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED, submit_family); // This is to flag the errors that would be considered only "unexpected" in the parallel case above // true -> positive test conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_EXTERNAL_KHR, VK_QUEUE_FAMILY_IGNORED, true); } BarrierQueueFamilyTestHelper excl_test(&test_context); excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode. // core_validation::barrier_queue_families::kSrcIgnoreRequiresDstIgnore excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, submit_family); excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_EXTERNAL_KHR); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); // core_validation::barrier_queue_families::kDstValidOrSpecialIfNotIgnore excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, invalid); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, VK_QUEUE_FAMILY_IGNORED, true); excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, VK_QUEUE_FAMILY_EXTERNAL_KHR, true); // core_validation::barrier_queue_families::kSrcValidOrSpecialIfNotIgnore excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", invalid, submit_family); // true -> positive test excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", submit_family, submit_family, true); excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, true); excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_EXTERNAL_KHR, submit_family, true); } TEST_F(VkLayerTest, ImageBarrierWithBadRange) { TEST_DESCRIPTION("VkImageMemoryBarrier with an invalid subresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier_template = {}; img_barrier_template.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier_template.pNext = NULL; img_barrier_template.srcAccessMask = 0; img_barrier_template.dstAccessMask = 0; img_barrier_template.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier_template.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier_template.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier_template.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier_template.image = image.handle(); // subresourceRange to be set later for the for the purposes of this test img_barrier_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier_template.subresourceRange.baseArrayLayer = 0; img_barrier_template.subresourceRange.baseMipLevel = 0; img_barrier_template.subresourceRange.layerCount = 0; img_barrier_template.subresourceRange.levelCount = 0; m_commandBuffer->begin(); // Nested scope here confuses clang-format, somehow // clang-format off // try for vkCmdPipelineBarrier { // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } } // try for vkCmdWaitEvents { VkEvent event; VkEventCreateInfo eci{VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, NULL, 0}; VkResult err = vkCreateEvent(m_device->handle(), &eci, nullptr, &event); ASSERT_VK_SUCCESS(err); // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageMemoryBarrier img_barrier = img_barrier_template; img_barrier.subresourceRange = range; vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier); m_errorMonitor->VerifyFound(); } vkDestroyEvent(m_device->handle(), event, nullptr); } // clang-format on } TEST_F(VkLayerTest, ValidationCacheTestBadMerge) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_VALIDATION_CACHE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Load extension functions auto fpCreateValidationCache = (PFN_vkCreateValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkCreateValidationCacheEXT"); auto fpDestroyValidationCache = (PFN_vkDestroyValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkDestroyValidationCacheEXT"); auto fpMergeValidationCaches = (PFN_vkMergeValidationCachesEXT)vkGetDeviceProcAddr(m_device->device(), "vkMergeValidationCachesEXT"); if (!fpCreateValidationCache || !fpDestroyValidationCache || !fpMergeValidationCaches) { printf("%s Failed to load function pointers for %s\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME); return; } VkValidationCacheCreateInfoEXT validationCacheCreateInfo; validationCacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT; validationCacheCreateInfo.pNext = NULL; validationCacheCreateInfo.initialDataSize = 0; validationCacheCreateInfo.pInitialData = NULL; validationCacheCreateInfo.flags = 0; VkValidationCacheEXT validationCache = VK_NULL_HANDLE; VkResult res = fpCreateValidationCache(m_device->device(), &validationCacheCreateInfo, nullptr, &validationCache); ASSERT_VK_SUCCESS(res); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMergeValidationCachesEXT-dstCache-01536"); res = fpMergeValidationCaches(m_device->device(), validationCache, 1, &validationCache); m_errorMonitor->VerifyFound(); fpDestroyValidationCache(m_device->device(), validationCache, nullptr); } TEST_F(VkPositiveLayerTest, LayoutFromPresentWithoutAccessMemoryRead) { // Transition an image away from PRESENT_SRC_KHR without ACCESS_MEMORY_READ // in srcAccessMask. // The required behavior here was a bit unclear in earlier versions of the // spec, but there is no memory dependency required here, so this should // work without warnings. m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier barrier = {}; VkImageSubresourceRange range; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = 0; barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; barrier.image = image.handle(); range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; range.baseMipLevel = 0; range.levelCount = 1; range.baseArrayLayer = 0; range.layerCount = 1; barrier.subresourceRange = range; VkCommandBufferObj cmdbuf(m_device, m_commandPool); cmdbuf.begin(); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); barrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.srcAccessMask = 0; barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, IdxBufferAlignmentError) { // Bind a BeginRenderPass within an active RenderPass ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); uint32_t const indices[] = {0}; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.size = 1024; buf_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT; buf_info.queueFamilyIndexCount = 1; buf_info.pQueueFamilyIndices = indices; VkBuffer buffer; VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements requirements; vkGetBufferMemoryRequirements(m_device->device(), buffer, &requirements); VkMemoryAllocateInfo alloc_info{}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; alloc_info.allocationSize = requirements.size; bool pass = m_device->phy().set_memory_type(requirements.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); ASSERT_TRUE(pass); VkDeviceMemory memory; err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, memory, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); ASSERT_VK_SUCCESS(err); // vkCmdBindPipeline(m_commandBuffer->handle(), // VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Should error before calling to driver so don't care about actual data m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBindIndexBuffer() offset (0x7) does not fall on "); vkCmdBindIndexBuffer(m_commandBuffer->handle(), buffer, 7, VK_INDEX_TYPE_UINT16); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), memory, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, InvalidQueueFamilyIndex) { // Create an out-of-range queueFamilyIndex ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buffCI.queueFamilyIndexCount = 2; // Introduce failure by specifying invalid queue_family_index uint32_t qfi[2]; qfi[0] = 777; qfi[1] = 0; buffCI.pQueueFamilyIndices = qfi; buffCI.sharingMode = VK_SHARING_MODE_CONCURRENT; // qfi only matters in CONCURRENT mode VkBuffer ib; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateBuffer: pCreateInfo->pQueueFamilyIndices[0] (= 777) is not one of the queue " "families given via VkDeviceQueueCreateInfo structures when the device was created."); vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib); m_errorMonitor->VerifyFound(); if (m_device->queue_props.size() > 2) { VkBuffer ib2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which was not created allowing concurrent"); // Create buffer shared to queue families 1 and 2, but submitted on queue family 0 buffCI.queueFamilyIndexCount = 2; qfi[0] = 1; qfi[1] = 2; vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib2); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), ib2, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; bool pass = false; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate required memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), ib2, NULL); return; } vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); vkBindBufferMemory(m_device->device(), ib2, mem, 0); m_commandBuffer->begin(); vkCmdFillBuffer(m_commandBuffer->handle(), ib2, 0, 16, 5); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); vkDestroyBuffer(m_device->device(), ib2, NULL); vkFreeMemory(m_device->device(), mem, NULL); } } TEST_F(VkLayerTest, ExecuteCommandsPrimaryCB) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a primary command buffer (should only be secondary)"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // An empty primary command buffer VkCommandBufferObj cb(m_device, m_commandPool); cb.begin(); cb.end(); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &renderPassBeginInfo(), VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); VkCommandBuffer handle = cb.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdExecuteCommands() called w/ Primary Cmd Buffer "); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &handle); m_errorMonitor->VerifyFound(); m_errorMonitor->SetUnexpectedError("All elements of pCommandBuffers must not be in the pending state"); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DSUsageBitsErrors) { TEST_DESCRIPTION("Attempt to update descriptor sets for images and buffers that do not have correct usage bits sets."); ASSERT_NO_FATAL_FAILURE(Init()); std::array<VkDescriptorPoolSize, VK_DESCRIPTOR_TYPE_RANGE_SIZE> ds_type_count; for (uint32_t i = 0; i < ds_type_count.size(); ++i) { ds_type_count[i].type = VkDescriptorType(i); ds_type_count[i].descriptorCount = 1; } vk_testing::DescriptorPool ds_pool; ds_pool.init(*m_device, vk_testing::DescriptorPool::create_info(0, VK_DESCRIPTOR_TYPE_RANGE_SIZE, ds_type_count)); ASSERT_TRUE(ds_pool.initialized()); std::vector<VkDescriptorSetLayoutBinding> dsl_bindings(1); dsl_bindings[0].binding = 0; dsl_bindings[0].descriptorType = VkDescriptorType(0); dsl_bindings[0].descriptorCount = 1; dsl_bindings[0].stageFlags = VK_SHADER_STAGE_ALL; dsl_bindings[0].pImmutableSamplers = NULL; // Create arrays of layout and descriptor objects using UpDescriptorSet = std::unique_ptr<vk_testing::DescriptorSet>; std::vector<UpDescriptorSet> descriptor_sets; using UpDescriptorSetLayout = std::unique_ptr<VkDescriptorSetLayoutObj>; std::vector<UpDescriptorSetLayout> ds_layouts; descriptor_sets.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); ds_layouts.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE); for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { dsl_bindings[0].descriptorType = VkDescriptorType(i); ds_layouts.push_back(UpDescriptorSetLayout(new VkDescriptorSetLayoutObj(m_device, dsl_bindings))); descriptor_sets.push_back(UpDescriptorSet(ds_pool.alloc_sets(*m_device, *ds_layouts.back()))); ASSERT_TRUE(descriptor_sets.back()->initialized()); } // Create a buffer & bufferView to be used for invalid updates const VkDeviceSize buffer_size = 256; uint8_t data[buffer_size]; VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT); VkConstantBufferObj storage_texel_buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT); ASSERT_TRUE(buffer.initialized() && storage_texel_buffer.initialized()); auto buff_view_ci = vk_testing::BufferView::createInfo(buffer.handle(), VK_FORMAT_R8_UNORM); vk_testing::BufferView buffer_view_obj, storage_texel_buffer_view_obj; buffer_view_obj.init(*m_device, buff_view_ci); buff_view_ci.buffer = storage_texel_buffer.handle(); storage_texel_buffer_view_obj.init(*m_device, buff_view_ci); ASSERT_TRUE(buffer_view_obj.initialized() && storage_texel_buffer_view_obj.initialized()); VkBufferView buffer_view = buffer_view_obj.handle(); VkBufferView storage_texel_buffer_view = storage_texel_buffer_view_obj.handle(); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.InitNoLayout(64, 64, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_obj.initialized()); VkImageView image_view = image_obj.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer.handle(); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = &buffer_view; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = &img_info; // These error messages align with VkDescriptorType struct std::string error_codes[] = { "VUID-VkWriteDescriptorSet-descriptorType-00326", // placeholder, no error for SAMPLER descriptor "VUID-VkWriteDescriptorSet-descriptorType-00326", // COMBINED_IMAGE_SAMPLER "VUID-VkWriteDescriptorSet-descriptorType-00326", // SAMPLED_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00326", // STORAGE_IMAGE "VUID-VkWriteDescriptorSet-descriptorType-00334", // UNIFORM_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00335", // STORAGE_TEXEL_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER "VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER_DYNAMIC "VUID-VkWriteDescriptorSet-descriptorType-00326" // INPUT_ATTACHMENT }; // Start loop at 1 as SAMPLER desc type has no usage bit error for (uint32_t i = 1; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) { if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { // Now check for UNIFORM_TEXEL_BUFFER using storage_texel_buffer_view descriptor_write.pTexelBufferView = &storage_texel_buffer_view; } descriptor_write.descriptorType = VkDescriptorType(i); descriptor_write.dstSet = descriptor_sets[i]->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_codes[i]); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) { descriptor_write.pTexelBufferView = &buffer_view; } } } TEST_F(VkLayerTest, DSBufferInfoErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has incorrect parameters in VkDescriptorBufferInfo struct. This includes:\n" "1. offset value greater than or equal to buffer size\n" "2. range value of 0\n" "3. range value greater than buffer (size - offset)"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for invalid updates VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = m_device->props.limits.minUniformBufferOffsetAlignment; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; // Cause error due to offset out of range buff_info.offset = buff_ci.size; buff_info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = ds.set_; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorBufferInfo-offset-00340"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Now cause error due to range of 0 buff_info.offset = 0; buff_info.range = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorBufferInfo-range-00341"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); // Now cause error due to range exceeding buffer size - offset buff_info.offset = 0; buff_info.range = buff_ci.size + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorBufferInfo-range-00342"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, DSBufferLimitErrors) { TEST_DESCRIPTION( "Attempt to update buffer descriptor set that has VkDescriptorBufferInfo values that violate device limits.\n" "Test cases include:\n" "1. range of uniform buffer update exceeds maxUniformBufferRange\n" "2. offset of uniform buffer update is not multiple of minUniformBufferOffsetAlignment\n" "3. range of storage buffer update exceeds maxStorageBufferRange\n" "4. offset of storage buffer update is not multiple of minStorageBufferOffsetAlignment"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); struct TestCase { VkDescriptorType descriptor_type; VkBufferUsageFlagBits buffer_usage; VkDeviceSize max_range; std::string max_range_vu; VkDeviceSize min_align; std::string min_align_vu; }; for (const auto &test_case : { TestCase({VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, m_device->props.limits.maxUniformBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00332", m_device->props.limits.minUniformBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00327"}), TestCase({VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, m_device->props.limits.maxStorageBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00333", m_device->props.limits.minStorageBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00328"}), }) { // Create layout with single buffer OneOffDescriptorSet ds(m_device, { {0, test_case.descriptor_type, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for invalid updates VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.usage = test_case.buffer_usage; bci.size = test_case.max_range + test_case.min_align; // Make buffer bigger than range limit bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &bci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = mem_reqs.size; bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); continue; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); if (VK_SUCCESS != err) { printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); continue; } err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = test_case.descriptor_type; descriptor_write.dstSet = ds.set_; // Exceed range limit if (test_case.max_range != UINT32_MAX) { buff_info.range = test_case.max_range + 1; buff_info.offset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Reduce size of range to acceptable limit and cause offset error if (test_case.min_align > 1) { buff_info.range = test_case.max_range; buff_info.offset = test_case.min_align - 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.min_align_vu); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } } TEST_F(VkLayerTest, DSAspectBitsErrors) { // TODO : Initially only catching case where DEPTH & STENCIL aspect bits // are set, but could expand this test to hit more cases. TEST_DESCRIPTION("Attempt to update descriptor sets for images that do not have correct aspect bits sets."); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create an image to be used for invalid updates VkImageObj image_obj(m_device); image_obj.Init(64, 64, 1, depth_format, VK_IMAGE_USAGE_SAMPLED_BIT); ASSERT_TRUE(image_obj.initialized()); VkImage image = image_obj.image(); // Now create view for image VkImageViewCreateInfo image_view_ci = {}; image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_ci.image = image; image_view_ci.format = depth_format; image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_ci.subresourceRange.layerCount = 1; image_view_ci.subresourceRange.baseArrayLayer = 0; image_view_ci.subresourceRange.levelCount = 1; // Setting both depth & stencil aspect bits is illegal for an imageView used // to populate a descriptor set. image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; VkImageView image_view; err = vkCreateImageView(m_device->device(), &image_view_ci, NULL, &image_view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo img_info = {}; img_info.imageView = image_view; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = NULL; descriptor_write.pBufferInfo = NULL; descriptor_write.pImageInfo = &img_info; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; descriptor_write.dstSet = ds.set_; // TODO(whenning42): Update this check to look for a VUID when this error is // assigned one. const char *error_msg = " please only set either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT "; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_msg); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), image_view, NULL); } TEST_F(VkLayerTest, DSTypeMismatch) { // Create DS w/ layout of one type and attempt Update w/ mis-matched type VkResult err; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #0 with type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER but update type is VK_DESCRIPTOR_TYPE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.descriptorCount = 1; // This is a mismatched type for the layout which expects BUFFER descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateOutOfBounds) { // For overlapping Update, have arrayIndex exceed that of layout m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); if (!buffer_test.GetBufferCurrent()) { // Something prevented creation of buffer so abort printf("%s Buffer creation failed, skipping test\n", kSkipPrefix); return; } // Correctly update descriptor to avoid "NOT_UPDATED" error VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer_test.GetBuffer(); buff_info.offset = 0; buff_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstArrayElement = 1; /* This index out of bounds for the update */ descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buff_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidDSUpdateIndex) { // Create layout w/ count of 1 and attempt update to that layout w/ binding index 2 VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00315"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, DSUpdateEmptyBinding) { // Create layout w/ empty binding and attempt to update it VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 0 /* !! */, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; // Lie here to avoid parameter_validation error // This is the wrong type, but empty binding error will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00316"); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, InvalidDSUpdateStruct) { // Call UpdateDS w/ struct type other than valid VK_STRUCTUR_TYPE_UPDATE_* // types VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, ".sType must be VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; /* Intentionally broken struct type */ descriptor_write.dstSet = ds.set_; descriptor_write.descriptorCount = 1; // This is the wrong type, but out of bounds will be flagged first descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, SampleDescriptorUpdateError) { // Create a single Sampler descriptor and send it an invalid Sampler m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00325"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSampler sampler = (VkSampler)((size_t)0xbaadbeef); // Sampler with invalid handle VkDescriptorImageInfo descriptor_info; memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo)); descriptor_info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &descriptor_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageViewDescriptorUpdateError) { // Create a single combined Image/Sampler descriptor and send it an invalid // imageView VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00326"); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkImageView view = (VkImageView)((size_t)0xbaadbeef); // invalid imageView object VkDescriptorImageInfo descriptor_info; memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo)); descriptor_info.sampler = sampler; descriptor_info.imageView = view; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &descriptor_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkLayerTest, CopyDescriptorUpdateErrors) { // Create DS w/ layout of 2 types, write update 1 and attempt to copy-update // into the other VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding #1 with type VK_DESCRIPTOR_TYPE_SAMPLER. Types do not match."); ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo info = {}; info.sampler = sampler; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(VkWriteDescriptorSet)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 1; // SAMPLER binding from layout above descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_write.pImageInfo = &info; // This write update should succeed vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Now perform a copy update that fails due to type mismatch VkCopyDescriptorSet copy_ds_update; memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 1; // Copy from SAMPLER binding copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; // ERROR : copy to UNIFORM binding copy_ds_update.descriptorCount = 1; // copy 1 descriptor vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " does not have copy update src binding of 3."); memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 3; // ERROR : Invalid binding for matching layout copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 1; // Copy 1 descriptor vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); // Now perform a copy update that fails due to binding out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " binding#1 with offset index of 1 plus update array offset of 0 and update of 5 " "descriptors oversteps total number of descriptors in set: 2."); memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet)); copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update.srcSet = ds.set_; copy_ds_update.srcBinding = 1; copy_ds_update.dstSet = ds.set_; copy_ds_update.dstBinding = 0; copy_ds_update.descriptorCount = 5; // ERROR copy 5 descriptors (out of bounds for layout) vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update); m_errorMonitor->VerifyFound(); vkDestroySampler(m_device->device(), sampler, NULL); } TEST_F(VkPositiveLayerTest, CopyNonupdatedDescriptors) { TEST_DESCRIPTION("Copy non-updated descriptors"); unsigned int i; ASSERT_NO_FATAL_FAILURE(Init()); OneOffDescriptorSet src_ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, {2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); OneOffDescriptorSet dst_ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, }); m_errorMonitor->ExpectSuccess(); const unsigned int copy_size = 2; VkCopyDescriptorSet copy_ds_update[copy_size]; memset(copy_ds_update, 0, sizeof(copy_ds_update)); for (i = 0; i < copy_size; i++) { copy_ds_update[i].sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_ds_update[i].srcSet = src_ds.set_; copy_ds_update[i].srcBinding = i; copy_ds_update[i].dstSet = dst_ds.set_; copy_ds_update[i].dstBinding = i; copy_ds_update[i].descriptorCount = 1; } vkUpdateDescriptorSets(m_device->device(), 0, NULL, copy_size, copy_ds_update); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, NumSamplesMismatch) { // Create CommandBuffer where MSAA samples doesn't match RenderPass // sampleCount m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Num samples mismatch! "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RenderPassIncompatible) { TEST_DESCRIPTION( "Hit RenderPass incompatible cases. Initial case is drawing with an active renderpass that's not compatible with the bound " "pipeline state object's creation renderpass"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices // Create a renderpass that will be incompatible with default renderpass VkAttachmentReference color_att = {}; color_att.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_att; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; // Format incompatible with PSO RP color attach format B8G8R8A8_UNORM attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {{0, 0}, {64, 64}}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), rp); VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = rp; cbii.subpass = 0; VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pInheritanceInfo = &cbii; vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-renderPass-00435"); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkLayerTest, NumBlendAttachMismatch) { // Create Pipeline where the number of blend attachments doesn't match the // number of color attachments. In this case, we don't add any color // blend attachments even though we have a color attachment. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Maint1BindingSliceOf3DImage) { TEST_DESCRIPTION( "Attempt to bind a slice of a 3D texture in a descriptor set. This is explicitly disallowed by KHR_maintenance1 to keep " "things simple for drivers."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkResult err; OneOffDescriptorSet set(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {32, 32, 32}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&ici); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); // Meat of the test. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageView-00343"); VkDescriptorImageInfo dii = {VK_NULL_HANDLE, view, VK_IMAGE_LAYOUT_GENERAL}; VkWriteDescriptorSet write = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, set.set_, 0, 0, 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &dii, nullptr, nullptr}; vkUpdateDescriptorSets(m_device->device(), 1, &write, 0, nullptr); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkLayerTest, MissingClearAttachment) { TEST_DESCRIPTION("Points to a wrong colorAttachment index in a VkClearAttachment structure passed to vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-aspectMask-00015"); VKTriangleTest(BsoFailCmdClearAttachments); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ConfirmNoVLErrorWhenVkCmdClearAttachmentsCalledInSecondaryCB) { TEST_DESCRIPTION( "This test is to verify that when vkCmdClearAttachments is called by a secondary commandbuffer, the validation layers do " "not throw an error if the primary commandbuffer begins a renderpass before executing the secondary commandbuffer."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferBeginInfo info = {}; VkCommandBufferInheritanceInfo hinfo = {}; info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; info.pInheritanceInfo = &hinfo; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.pNext = NULL; hinfo.renderPass = renderPass(); hinfo.subpass = 0; hinfo.framebuffer = m_framebuffer; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; secondary.begin(&info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0.0; color_attachment.clearValue.color.float32[1] = 0.0; color_attachment.clearValue.color.float32[2] = 0.0; color_attachment.clearValue.color.float32[3] = 0.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; vkCmdClearAttachments(secondary.handle(), 1, &color_attachment, 1, &clear_rect); secondary.end(); // Modify clear rect here to verify that it doesn't cause validation error clear_rect = {{{0, 0}, {99999999, 99999999}}, 0, 0}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CmdClearAttachmentTests) { TEST_DESCRIPTION("Various tests for validating usage of vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); // We shouldn't need a fragment shader but add it to be able to run // on more devices VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Main thing we care about for this test is that the VkImage obj we're // clearing matches Color Attachment of FB // Also pass down other dummy params to keep driver and paramchecker happy VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 1.0; color_attachment.clearValue.color.float32[1] = 1.0; color_attachment.clearValue.color.float32[2] = 1.0; color_attachment.clearValue.color.float32[3] = 1.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; // Call for full-sized FB Color attachment prior to issuing a Draw m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "vkCmdClearAttachments() issued on command buffer object "); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); clear_rect.rect.extent.width = renderPassBeginInfo().renderArea.extent.width + 4; clear_rect.rect.extent.height = clear_rect.rect.extent.height / 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer >= view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 1; clear_rect.layerCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer + layerCount > view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 0; clear_rect.layerCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017"); vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, VtxBufferBadIndex) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "but no vertex buffers are attached to this Pipeline State Object"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Don't care about actual data, just need to get to draw to flag error static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)0, 1); // VBO idx 1, but no VBO in PSO m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, MismatchCountQueueCreateRequestedFeature) { TEST_DESCRIPTION("Use an invalid count in a vkEnumeratePhysicalDevices call.Use invalid Queue Family Index in vkCreateDevice"); ASSERT_NO_FATAL_FAILURE(Init()); // The following test fails with recent NVidia drivers. // By the time core_validation is reached, the NVidia // driver has sanitized the invalid condition and core_validation // is not introduced to the failure condition. This is not the case // with AMD and Mesa drivers. Further investigation is required // uint32_t count = static_cast<uint32_t>(~0); // VkPhysicalDevice physical_device; // vkEnumeratePhysicalDevices(instance(), &count, &physical_device); // m_errorMonitor->VerifyFound(); float queue_priority = 0.0; VkDeviceQueueCreateInfo queue_create_info = {}; queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO; queue_create_info.queueCount = 1; queue_create_info.pQueuePriorities = &queue_priority; queue_create_info.queueFamilyIndex = static_cast<uint32_t>(~0); VkPhysicalDeviceFeatures features = m_device->phy().features(); VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.queueCreateInfoCount = 1; device_create_info.pQueueCreateInfos = &queue_create_info; device_create_info.pEnabledFeatures = &features; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381"); // The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do // not use the LunarG loader (e.g. Android) will not see the message and the test will fail. m_errorMonitor->SetUnexpectedError("Failed to create device chain."); vkCreateDevice(gpu(), &device_create_info, nullptr, &testDevice); m_errorMonitor->VerifyFound(); vk_testing::QueueCreateInfoArray queue_info_obj(m_device->queue_props); device_create_info.queueCreateInfoCount = queue_info_obj.size(); device_create_info.pQueueCreateInfos = queue_info_obj.data(); unsigned feature_count = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32); VkBool32 *feature_array = reinterpret_cast<VkBool32 *>(&features); for (unsigned i = 0; i < feature_count; i++) { if (VK_FALSE == feature_array[i]) { feature_array[i] = VK_TRUE; device_create_info.pEnabledFeatures = &features; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "While calling vkCreateDevice(), requesting feature"); // The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms // that do not use the LunarG loader (e.g. Android) will not see the message and the test will fail. m_errorMonitor->SetUnexpectedError("Failed to create device chain."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You requested features that are unavailable on this device. You should first " "query feature availability by calling vkGetPhysicalDeviceFeatures()."); vkCreateDevice(gpu(), &device_create_info, nullptr, &testDevice); m_errorMonitor->VerifyFound(); break; } } } TEST_F(VkLayerTest, InvalidQueryPoolCreate) { TEST_DESCRIPTION("Attempt to create a query pool for PIPELINE_STATISTICS without enabling pipeline stats for the device."); ASSERT_NO_FATAL_FAILURE(Init()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); VkDevice local_device; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); // Intentionally disable pipeline stats features.pipelineStatisticsQuery = VK_FALSE; device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.pEnabledFeatures = &features; VkResult err = vkCreateDevice(gpu(), &device_create_info, nullptr, &local_device); ASSERT_VK_SUCCESS(err); VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; qpci.queryCount = 1; VkQueryPool query_pool; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkQueryPoolCreateInfo-queryType-00791"); vkCreateQueryPool(local_device, &qpci, nullptr, &query_pool); m_errorMonitor->VerifyFound(); vkDestroyDevice(local_device, nullptr); } TEST_F(VkLayerTest, UnclosedQuery) { TEST_DESCRIPTION("End a command buffer with a query still in progress."); const char *invalid_query = "Ending command buffer with in progress query: queryPool 0x"; ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_query); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info = {}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0 /*startQuery*/, 1 /*queryCount*/); vkCmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkDestroyEvent(m_device->device(), event, nullptr); } TEST_F(VkLayerTest, VertexBufferInvalid) { TEST_DESCRIPTION( "Submit a command buffer using deleted vertex buffer, delete a buffer twice, use an invalid offset for each buffer type, " "and attempt to bind a null buffer"); const char *deleted_buffer_in_command_buffer = "Cannot submit cmd buffer using deleted buffer "; const char *invalid_offset_message = "vkBindBufferMemory(): memoryOffset is 0x"; const char *invalid_storage_buffer_offset_message = "vkBindBufferMemory(): storage memoryOffset is 0x"; const char *invalid_texel_buffer_offset_message = "vkBindBufferMemory(): texel memoryOffset is 0x"; const char *invalid_uniform_buffer_offset_message = "vkBindBufferMemory(): uniform memoryOffset is 0x"; ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {}; pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; pipe_ms_state_ci.pNext = NULL; pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = nullptr; const VkPipelineLayoutObj pipeline_layout(m_device); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); { // Create and bind a vertex buffer in a reduced scope, which will cause // it to be deleted upon leaving this scope const float vbo_data[3] = {1.f, 0.f, 1.f}; VkVerticesObj draw_verticies(m_device, 1, 1, sizeof(vbo_data[0]), sizeof(vbo_data) / sizeof(vbo_data[0]), vbo_data); draw_verticies.BindVertexBuffers(m_commandBuffer->handle()); draw_verticies.AddVertexInputToPipe(pipe); } m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, deleted_buffer_in_command_buffer); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); { // Create and bind a vertex buffer in a reduced scope, and delete it // twice, the second through the destructor VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eDoubleDelete); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBuffer-buffer-parameter"); buffer_test.TestDoubleDestroy(); } m_errorMonitor->VerifyFound(); m_errorMonitor->SetUnexpectedError("value of pCreateInfo->usage must not be 0"); if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidMemoryOffset)) { // Create and bind a memory buffer with an invalid offset. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_offset_message); m_errorMonitor->SetUnexpectedError( "If buffer was created with the VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT or VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, " "memoryOffset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VkBufferTest::eInvalidMemoryOffset); (void)buffer_test; m_errorMonitor->VerifyFound(); } if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidDeviceOffset, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) { // Create and bind a memory buffer with an invalid offset again, // but look for a texel buffer message. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_texel_buffer_offset_message); m_errorMonitor->SetUnexpectedError( "memoryOffset must be an integer multiple of the alignment member of the VkMemoryRequirements structure returned from " "a call to vkGetBufferMemoryRequirements with buffer"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VkBufferTest::eInvalidDeviceOffset); (void)buffer_test; m_errorMonitor->VerifyFound(); } if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidDeviceOffset, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) { // Create and bind a memory buffer with an invalid offset again, but // look for a uniform buffer message. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_uniform_buffer_offset_message); m_errorMonitor->SetUnexpectedError( "memoryOffset must be an integer multiple of the alignment member of the VkMemoryRequirements structure returned from " "a call to vkGetBufferMemoryRequirements with buffer"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VkBufferTest::eInvalidDeviceOffset); (void)buffer_test; m_errorMonitor->VerifyFound(); } if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidDeviceOffset, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) { // Create and bind a memory buffer with an invalid offset again, but // look for a storage buffer message. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_storage_buffer_offset_message); m_errorMonitor->SetUnexpectedError( "memoryOffset must be an integer multiple of the alignment member of the VkMemoryRequirements structure returned from " "a call to vkGetBufferMemoryRequirements with buffer"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eInvalidDeviceOffset); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to bind a null buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkBindBufferMemory: required parameter buffer specified as VK_NULL_HANDLE"); VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindNullBuffer); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to bind a fake buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-parameter"); VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindFakeBuffer); (void)buffer_test; m_errorMonitor->VerifyFound(); } { // Attempt to use an invalid handle to delete a buffer. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeMemory-memory-parameter"); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eFreeInvalidHandle); (void)buffer_test; } m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, BadVertexBufferOffset) { TEST_DESCRIPTION("Submit an offset past the end of a vertex buffer"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindVertexBuffers-pOffsets-00626"); m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)(3 * sizeof(float)), 1); // Offset at the end of the buffer m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } // INVALID_IMAGE_LAYOUT tests (one other case is hit by MapMemWithoutHostVisibleBit and not here) TEST_F(VkLayerTest, InvalidImageLayout) { TEST_DESCRIPTION( "Hit all possible validation checks associated with the DRAWSTATE_INVALID_IMAGE_LAYOUT enum. Generally these involve " "having images in the wrong layout when they're copied or transitioned."); // 3 in ValidateCmdBufImageLayouts // * -1 Attempt to submit cmd buf w/ deleted image // * -2 Cmd buf submit of image w/ layout not matching first use w/ subresource // * -3 Cmd buf submit of image w/ layout not matching first use w/o subresource ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } // Create src & dst images to use for copy operations VkImage src_image; VkImage dst_image; VkImage depth_image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 4; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; image_create_info.flags = 0; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &src_image); ASSERT_VK_SUCCESS(err); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dst_image); ASSERT_VK_SUCCESS(err); image_create_info.format = VK_FORMAT_D16_UNORM; image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &depth_image); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryRequirements img_mem_reqs = {}; VkMemoryAllocateInfo mem_alloc = {}; VkDeviceMemory src_image_mem, dst_image_mem, depth_image_mem; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), src_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &src_image_mem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dst_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &dst_image_mem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), depth_image, &img_mem_reqs); mem_alloc.allocationSize = img_mem_reqs.size; pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &depth_image_mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), src_image, src_image_mem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dst_image, dst_image_mem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), depth_image, depth_image_mem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.srcOffset.x = 0; copy_region.srcOffset.y = 0; copy_region.srcOffset.z = 0; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.mipLevel = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.dstSubresource.layerCount = 1; copy_region.dstOffset.x = 0; copy_region.dstOffset.y = 0; copy_region.dstOffset.z = 0; copy_region.extent.width = 1; copy_region.extent.height = 1; copy_region.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // The first call hits the expected WARNING and skips the call down the chain, so call a second time to call down chain and // update layer state m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); // Now cause error due to src image layout changing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00128"); m_errorMonitor->SetUnexpectedError("is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT"); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Final src error is due to bad layout type m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00129"); m_errorMonitor->SetUnexpectedError( "with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Now verify same checks for dst m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL."); m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Now cause error due to src image layout changing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00133"); m_errorMonitor->SetUnexpectedError( "is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, &copy_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00134"); m_errorMonitor->SetUnexpectedError( "with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL."); m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, &copy_region); m_errorMonitor->VerifyFound(); // Convert dst and depth images to TRANSFER_DST for subsequent tests VkImageMemoryBarrier transfer_dst_image_barrier[1] = {}; transfer_dst_image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; transfer_dst_image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; transfer_dst_image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; transfer_dst_image_barrier[0].srcAccessMask = 0; transfer_dst_image_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; transfer_dst_image_barrier[0].image = dst_image; transfer_dst_image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers; transfer_dst_image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels; transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, transfer_dst_image_barrier); transfer_dst_image_barrier[0].image = depth_image; transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, transfer_dst_image_barrier); // Cause errors due to clearing with invalid image layouts VkClearColorValue color_clear_value = {}; VkImageSubresourceRange clear_range; clear_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; clear_range.baseMipLevel = 0; clear_range.baseArrayLayer = 0; clear_range.layerCount = 1; clear_range.levelCount = 1; // Fail due to explicitly prohibited layout for color clear (only GENERAL and TRANSFER_DST are permitted). // Since the image is currently not in UNDEFINED layout, this will emit two errors. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00005"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004"); m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_UNDEFINED, &color_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Fail due to provided layout not matching actual current layout for color clear. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004"); m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_GENERAL, &color_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); VkClearDepthStencilValue depth_clear_value = {}; clear_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Fail due to explicitly prohibited layout for depth clear (only GENERAL and TRANSFER_DST are permitted). // Since the image is currently not in UNDEFINED layout, this will emit two errors. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00012"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"); m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_UNDEFINED, &depth_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Fail due to provided layout not matching actual current layout for depth clear. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011"); m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_GENERAL, &depth_clear_value, 1, &clear_range); m_errorMonitor->VerifyFound(); // Now cause error due to bad image layout transition in PipelineBarrier VkImageMemoryBarrier image_barrier[1] = {}; image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; image_barrier[0].image = src_image; image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers; image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels; image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01210"); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, image_barrier); m_errorMonitor->VerifyFound(); // Finally some layout errors at RenderPass create time // Just hacking in specific state to get to the errors we want so don't copy this unless you know what you're doing. VkAttachmentReference attach = {}; // perf warning for GENERAL layout w/ non-DS input attachment attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.inputAttachmentCount = 1; subpass.pInputAttachments = &attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-general layout attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for input attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be READ_ONLY_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); subpass.inputAttachmentCount = 0; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach; attach.layout = VK_IMAGE_LAYOUT_GENERAL; // perf warning for GENERAL layout on color attachment m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-color opt or GENERAL layout for color attachment attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for color attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); subpass.colorAttachmentCount = 0; subpass.pDepthStencilAttachment = &attach; attach.layout = VK_IMAGE_LAYOUT_GENERAL; // perf warning for GENERAL layout on DS attachment m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "GENERAL layout for depth attachment may not give optimal performance."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // error w/ non-ds opt or GENERAL layout for color attachment attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Layout for depth attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be " "DEPTH_STENCIL_ATTACHMENT_OPTIMAL, DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL."); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); // For this error we need a valid renderpass so create default one attach.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; attach.attachment = 0; attach_desc.format = depth_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // Can't do a CLEAR load on READ_ONLY initialLayout attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "with invalid first layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL"); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); vkFreeMemory(m_device->device(), src_image_mem, NULL); vkFreeMemory(m_device->device(), dst_image_mem, NULL); vkFreeMemory(m_device->device(), depth_image_mem, NULL); vkDestroyImage(m_device->device(), src_image, NULL); vkDestroyImage(m_device->device(), dst_image, NULL); vkDestroyImage(m_device->device(), depth_image, NULL); } TEST_F(VkLayerTest, InvalidStorageImageLayout) { TEST_DESCRIPTION("Attempt to update a STORAGE_IMAGE descriptor w/o GENERAL layout."); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM; VkImageTiling tiling; VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), tex_format, &format_properties); if (format_properties.linearTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) { tiling = VK_IMAGE_TILING_LINEAR; } else if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) { tiling = VK_IMAGE_TILING_OPTIMAL; } else { printf("%s Device does not support VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT; skipped.\n", kSkipPrefix); return; } OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkImageObj image(m_device); image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_STORAGE_BIT, tiling, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(tex_format); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; descriptor_write.pImageInfo = &image_info; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout " "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but according to spec "); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NonSimultaneousSecondaryMarksPrimary) { ASSERT_NO_FATAL_FAILURE(Init()); const char *simultaneous_use_message = "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); secondary.end(); VkCommandBufferBeginInfo cbbi = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr, }; m_commandBuffer->begin(&cbbi); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseSecondaryTwoExecutes) { ASSERT_NO_FATAL_FAILURE(Init()); const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo inh = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, }; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh}; secondary.begin(&cbbi); secondary.end(); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseSecondarySingleExecute) { ASSERT_NO_FATAL_FAILURE(Init()); // variation on previous test executing the same CB twice in the same // CmdExecuteCommands call const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!"; VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkCommandBufferInheritanceInfo inh = { VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, nullptr, }; VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh}; secondary.begin(&cbbi); secondary.end(); m_commandBuffer->begin(); VkCommandBuffer cbs[] = {secondary.handle(), secondary.handle()}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkCmdExecuteCommands(m_commandBuffer->handle(), 2, cbs); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SimultaneousUseOneShot) { TEST_DESCRIPTION("Submit the same command buffer twice in one submit looking for simultaneous use and one time submit errors"); const char *simultaneous_use_message = "is already in use and is not marked for simultaneous use"; const char *one_shot_message = "VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted"; ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBuffer cmd_bufs[2]; VkCommandBufferAllocateInfo alloc_info; alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.commandBufferCount = 2; alloc_info.commandPool = m_commandPool->handle(); alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs); VkCommandBufferBeginInfo cb_binfo; cb_binfo.pNext = NULL; cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_binfo.pInheritanceInfo = VK_NULL_HANDLE; cb_binfo.flags = 0; vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(cmd_bufs[0], 0, 1, &viewport); vkEndCommandBuffer(cmd_bufs[0]); VkCommandBuffer duplicates[2] = {cmd_bufs[0], cmd_bufs[0]}; VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 2; submit_info.pCommandBuffers = duplicates; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Set one time use and now look for one time submit duplicates[0] = duplicates[1] = cmd_bufs[1]; cb_binfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT | VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo); vkCmdSetViewport(cmd_bufs[1], 0, 1, &viewport); vkEndCommandBuffer(cmd_bufs[1]); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, one_shot_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); } TEST_F(VkLayerTest, StageMaskGsTsEnabled) { TEST_DESCRIPTION( "Attempt to use a stageMask w/ geometry shader and tesselation shader bits enabled when those features are disabled on the " "device."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Make sure gs & ts are disabled features.geometryShader = false; features.tessellationShader = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_; VkCommandPool command_pool; vkCreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool); VkCommandBufferAllocateInfo cmd = {}; cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd.pNext = NULL; cmd.commandPool = command_pool; cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd.commandBufferCount = 1; VkCommandBuffer cmd_buffer; VkResult err = vkAllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer); ASSERT_VK_SUCCESS(err); VkEvent event; VkEventCreateInfo evci = {}; evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; VkResult result = vkCreateEvent(test_device.handle(), &evci, NULL, &event); ASSERT_VK_SUCCESS(result); VkCommandBufferBeginInfo cbbi = {}; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(cmd_buffer, &cbbi); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01150"); vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01151"); vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT); m_errorMonitor->VerifyFound(); vkDestroyEvent(test_device.handle(), event, NULL); vkDestroyCommandPool(test_device.handle(), command_pool, NULL); } TEST_F(VkLayerTest, EventInUseDestroyedSignaled) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); VkEvent event; VkEventCreateInfo event_create_info = {}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); m_commandBuffer->end(); vkDestroyEvent(m_device->device(), event, nullptr); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound"); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InUseDestroyedSignaled) { TEST_DESCRIPTION( "Use vkCmdExecuteCommands with invalid state in primary and secondary command buffers. Delete objects that are in use. " "Call VkQueueSubmit with an event that has been deleted."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; VkFence fence; ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence)); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); VkDescriptorBufferInfo buffer_info = {}; buffer_info.buffer = buffer_test.GetBuffer(); buffer_info.offset = 0; buffer_info.range = 1024; VkWriteDescriptorSet write_descriptor_set = {}; write_descriptor_set.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor_set.dstSet = ds.set_; write_descriptor_set.descriptorCount = 1; write_descriptor_set.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor_set.pBufferInfo = &buffer_info; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor_set, 0, nullptr); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); pipe.CreateVKPipeline(pipeline_layout.handle(), m_renderPass); VkEvent event; VkEventCreateInfo event_create_info = {}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); m_commandBuffer->begin(); vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, NULL); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); m_errorMonitor->Reset(); // resume logmsg processing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyEvent-event-01145"); vkDestroyEvent(m_device->device(), event, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySemaphore-semaphore-01137"); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Fence 0x"); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If semaphore is not VK_NULL_HANDLE, semaphore must be a valid VkSemaphore handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Semaphore obj"); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->SetUnexpectedError("If fence is not VK_NULL_HANDLE, fence must be a valid VkFence handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Fence obj"); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->SetUnexpectedError("If event is not VK_NULL_HANDLE, event must be a valid VkEvent handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Event obj"); vkDestroyEvent(m_device->device(), event, nullptr); } TEST_F(VkLayerTest, QueryPoolInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use query pool."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_ci{}; query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_ci.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool); m_commandBuffer->begin(); // Reset query pool to create binding with cmd buffer vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetQueryPoolResults-queryType-00818"); uint32_t data_space[16]; m_errorMonitor->SetUnexpectedError("Cannot get query results on queryPool"); vkGetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, sizeof(uint32_t), VK_QUERY_RESULT_PARTIAL_BIT); m_errorMonitor->VerifyFound(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy query pool while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyQueryPool-queryPool-00793"); vkDestroyQueryPool(m_device->handle(), query_pool, NULL); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now that cmd buffer done we can safely destroy query_pool m_errorMonitor->SetUnexpectedError("If queryPool is not VK_NULL_HANDLE, queryPool must be a valid VkQueryPool handle"); m_errorMonitor->SetUnexpectedError("Unable to remove QueryPool obj"); vkDestroyQueryPool(m_device->handle(), query_pool, NULL); } TEST_F(VkLayerTest, PipelineInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use pipeline."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPipelineLayoutObj pipeline_layout(m_device); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyPipeline-pipeline-00765"); // Create PSO to be used for draw-time errors below VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Store pipeline handle so we can actually delete it before test finishes VkPipeline delete_this_pipeline; { // Scope pipeline so it will be auto-deleted VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); delete_this_pipeline = pipe.handle(); m_commandBuffer->begin(); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then pipeline destroyed while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } // Pipeline deletion triggered here m_errorMonitor->VerifyFound(); // Make sure queue finished and then actually delete pipeline vkQueueWaitIdle(m_device->m_queue); m_errorMonitor->SetUnexpectedError("If pipeline is not VK_NULL_HANDLE, pipeline must be a valid VkPipeline handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Pipeline obj"); vkDestroyPipeline(m_device->handle(), delete_this_pipeline, nullptr); } TEST_F(VkLayerTest, CreateImageViewBreaksParameterCompatibilityRequirements) { TEST_DESCRIPTION( "Attempts to create an Image View with a view type that does not match the image type it is being created from."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); VkPhysicalDeviceMemoryProperties memProps; vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memProps); // Test mismatch detection for image of type VK_IMAGE_TYPE_1D VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_1D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image1D(m_device); image1D.init(&imgInfo); ASSERT_TRUE(image1D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image1D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_2D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Test mismatch detection for image of type VK_IMAGE_TYPE_2D imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 6, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image2D(m_device); image2D.init(&imgInfo); ASSERT_TRUE(image2D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image2D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_3D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_3D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Change VkImageViewCreateInfo to different mismatched viewType ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE; ivci.subresourceRange.layerCount = 6; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01003"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Test mismatch detection for image of type VK_IMAGE_TYPE_3D imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image3D(m_device); image3D.init(&imgInfo); ASSERT_TRUE(image3D.initialized()); // Initialize VkImageViewCreateInfo with mismatched viewType ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image3D.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_1D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_1D is not compatible with image"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Change VkImageViewCreateInfo to different mismatched viewType ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; // Test for error message if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01005"); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subResourceRange-01021"); } vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Check if the device can make the image required for this test case. VkImageFormatProperties formProps = {{0, 0, 0}, 0, 0, 0, 0}; VkResult res = vkGetPhysicalDeviceImageFormatProperties( m_device->phy().handle(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_3D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT, &formProps); // If not, skip this part of the test. if (res || !m_device->phy().features().sparseBinding || !DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { printf("%s %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME); return; } // Initialize VkImageCreateInfo with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR and VK_IMAGE_CREATE_SPARSE_BINDING_BIT which // are incompatible create flags. imgInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_TYPE_3D, VK_FORMAT_R8G8B8A8_UNORM, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImage imageSparse; // Creating a sparse image means we should not bind memory to it. res = vkCreateImage(m_device->device(), &imgInfo, NULL, &imageSparse); ASSERT_FALSE(res); // Initialize VkImageViewCreateInfo to create a view that will attempt to utilize VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR. ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = imageSparse; ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or " "VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled."); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Clean up vkDestroyImage(m_device->device(), imageSparse, nullptr); } TEST_F(VkLayerTest, CreateImageViewFormatFeatureMismatch) { TEST_DESCRIPTION("Create view with a format that does not have the same features as the image format."); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } // List of features to be tested VkFormatFeatureFlagBits features[] = {VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT}; uint32_t feature_count = 4; // List of usage cases for each feature test VkImageUsageFlags usages[] = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT}; // List of errors that will be thrown in order of tests run std::string optimal_error_codes[] = { "VUID-VkImageViewCreateInfo-image-01013", "VUID-VkImageViewCreateInfo-image-01014", "VUID-VkImageViewCreateInfo-image-01015", "VUID-VkImageViewCreateInfo-image-01016", }; VkFormatProperties formatProps; // First three tests uint32_t i = 0; for (i = 0; i < (feature_count - 1); i++) { // Modify formats to have mismatched features // Format for image fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps); formatProps.optimalTilingFeatures |= features[i]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps); memset(&formatProps, 0, sizeof(formatProps)); // Format for view fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps); formatProps.optimalTilingFeatures = features[(i + 1) % feature_count]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps); // Create image with modified format VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R32G32B32A32_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, usages[i], VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; // Initialize VkImageViewCreateInfo with modified format VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R32G32B32A32_SINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (!res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } // Test for VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT. Needs special formats // Only run this test if format supported if (!ImageFormatIsSupported(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_TILING_OPTIMAL)) { printf("%s VK_FORMAT_D24_UNORM_S8_UINT format not supported - skipped.\n", kSkipPrefix); return; } // Modify formats to have mismatched features // Format for image fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, &formatProps); formatProps.optimalTilingFeatures |= features[i]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, formatProps); memset(&formatProps, 0, sizeof(formatProps)); // Format for view fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &formatProps); formatProps.optimalTilingFeatures = features[(i + 1) % feature_count]; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, formatProps); // Create image with modified format VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_D24_UNORM_S8_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, usages[i], VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; // Initialize VkImageViewCreateInfo with modified format VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_D32_SFLOAT_S8_UINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; // Test for error message m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (!res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } TEST_F(VkLayerTest, InvalidImageViewUsageCreateInfo) { TEST_DESCRIPTION("Usage modification via a chained VkImageViewUsageCreateInfo struct"); if (!EnableDeviceProfileLayer()) { printf("%s Test requires DeviceProfileLayer, unavailable - skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) { printf("%s Test requires API >= 1.1 or KHR_MAINTENANCE2 extension, unavailable - skipped.\n", kSkipPrefix); return; } m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not avaiable.\n", kSkipPrefix); return; } VkFormatProperties formatProps; // Ensure image format claims support for sampled and storage, excludes color attachment memset(&formatProps, 0, sizeof(formatProps)); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps); formatProps.optimalTilingFeatures |= (VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT); formatProps.optimalTilingFeatures = formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps); // Create image with sampled and storage usages VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R32G32B32A32_UINT, {1, 1, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); // Force the imageview format to exclude storage feature, include color attachment memset(&formatProps, 0, sizeof(formatProps)); fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; formatProps.optimalTilingFeatures = (formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT); fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps); VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R32G32B32A32_SINT; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // ImageView creation should fail because view format doesn't support all the underlying image's usages VkImageView imageView; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01014"); VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); // Add a chained VkImageViewUsageCreateInfo to override original image usage bits, removing storage VkImageViewUsageCreateInfo usage_ci = {VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, nullptr, VK_IMAGE_USAGE_SAMPLED_BIT}; // Link the VkImageViewUsageCreateInfo struct into the view's create info pNext chain ivci.pNext = &usage_ci; // ImageView should now succeed without error m_errorMonitor->ExpectSuccess(); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try a zero usage field usage_ci.usage = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-requiredbitmask"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try a usage field with a bit not supported by underlying image usage_ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-01587"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } // Try an illegal bit in usage field usage_ci.usage = 0x10000000 | VK_IMAGE_USAGE_SAMPLED_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-parameter"); res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); if (VK_SUCCESS == res) { vkDestroyImageView(m_device->device(), imageView, nullptr); } } TEST_F(VkLayerTest, ImageViewInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use imageView."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to use the sampler char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImageView-imageView-01026"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer then destroy sampler VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy imageView while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroyImageView(m_device->device(), view, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy imageView m_errorMonitor->SetUnexpectedError("If imageView is not VK_NULL_HANDLE, imageView must be a valid VkImageView handle"); m_errorMonitor->SetUnexpectedError("Unable to remove ImageView obj"); vkDestroyImageView(m_device->device(), view, NULL); vkDestroySampler(m_device->device(), sampler, nullptr); } TEST_F(VkLayerTest, BufferViewInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use bufferView."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferView view; VkBufferViewCreateInfo bvci = {}; bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; bvci.buffer = buffer; bvci.format = VK_FORMAT_R32_SFLOAT; bvci.range = VK_WHOLE_SIZE; err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view); ASSERT_VK_SUCCESS(err); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &view; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = imageLoad(s, 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBufferView-bufferView-00936"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy bufferView while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroyBufferView(m_device->device(), view, nullptr); m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy bufferView m_errorMonitor->SetUnexpectedError("If bufferView is not VK_NULL_HANDLE, bufferView must be a valid VkBufferView handle"); m_errorMonitor->SetUnexpectedError("Unable to remove BufferView obj"); vkDestroyBufferView(m_device->device(), view, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } TEST_F(VkLayerTest, SamplerInUseDestroyedSignaled) { TEST_DESCRIPTION("Delete in-use sampler."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err; err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; err = vkCreateImageView(m_device->device(), &ivci, NULL, &view); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo image_info{}; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; image_info.imageView = view; image_info.sampler = sampler; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_write.pImageInfo = &image_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); // Create PSO to use the sampler char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2D s;\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = texture(s, vec2(1));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySampler-sampler-01082"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Bind pipeline to cmd buffer vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); VkViewport viewport = {0, 0, 16, 16, 0, 1}; VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); m_commandBuffer->Draw(1, 0, 0, 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); // Submit cmd buffer then destroy sampler VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &m_commandBuffer->handle(); // Submit cmd buffer and then destroy sampler while in-flight vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); vkDestroySampler(m_device->device(), sampler, nullptr); // Destroyed too soon m_errorMonitor->VerifyFound(); vkQueueWaitIdle(m_device->m_queue); // Now we can actually destroy sampler m_errorMonitor->SetUnexpectedError("If sampler is not VK_NULL_HANDLE, sampler must be a valid VkSampler handle"); m_errorMonitor->SetUnexpectedError("Unable to remove Sampler obj"); vkDestroySampler(m_device->device(), sampler, NULL); // Destroyed for real vkDestroyImageView(m_device->device(), view, NULL); } TEST_F(VkLayerTest, UpdateDestroyDescriptorSetLayout) { TEST_DESCRIPTION("Attempt updates to descriptor sets with destroyed descriptor set layouts"); // TODO: Update to match the descriptor set layout specific VUIDs/VALIDATION_ERROR_* when present const auto kWriteDestroyedLayout = "VUID-VkWriteDescriptorSet-dstSet-00320"; const auto kCopyDstDestroyedLayout = "VUID-VkCopyDescriptorSet-dstSet-parameter"; const auto kCopySrcDestroyedLayout = "VUID-VkCopyDescriptorSet-srcSet-parameter"; ASSERT_NO_FATAL_FAILURE(Init()); // Set up the descriptor (resource) and write/copy operations to use. float data[16] = {}; VkConstantBufferObj buffer(m_device, sizeof(data), data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); ASSERT_TRUE(buffer.initialized()); VkDescriptorBufferInfo info = {}; info.buffer = buffer.handle(); info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet write_descriptor = {}; write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_descriptor.dstSet = VK_NULL_HANDLE; // must update this write_descriptor.dstBinding = 0; write_descriptor.descriptorCount = 1; write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; write_descriptor.pBufferInfo = &info; VkCopyDescriptorSet copy_descriptor = {}; copy_descriptor.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET; copy_descriptor.srcSet = VK_NULL_HANDLE; // must update copy_descriptor.srcBinding = 0; copy_descriptor.dstSet = VK_NULL_HANDLE; // must update copy_descriptor.dstBinding = 0; copy_descriptor.descriptorCount = 1; // Create valid and invalid source and destination descriptor sets std::vector<VkDescriptorSetLayoutBinding> one_uniform_buffer = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }; OneOffDescriptorSet good_dst(m_device, one_uniform_buffer); ASSERT_TRUE(good_dst.Initialized()); OneOffDescriptorSet bad_dst(m_device, one_uniform_buffer); // Must assert before invalidating it below ASSERT_TRUE(bad_dst.Initialized()); bad_dst.layout_ = VkDescriptorSetLayoutObj(); OneOffDescriptorSet good_src(m_device, one_uniform_buffer); ASSERT_TRUE(good_src.Initialized()); // Put valid data in the good and bad sources, simultaneously doing a positive test on write and copy operations m_errorMonitor->ExpectSuccess(); write_descriptor.dstSet = good_src.set_; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyNotFound(); OneOffDescriptorSet bad_src(m_device, one_uniform_buffer); ASSERT_TRUE(bad_src.Initialized()); // to complete our positive testing use copy, where above we used write. copy_descriptor.srcSet = good_src.set_; copy_descriptor.dstSet = bad_src.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); bad_src.layout_ = VkDescriptorSetLayoutObj(); m_errorMonitor->VerifyNotFound(); // Trigger the three invalid use errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kWriteDestroyedLayout); write_descriptor.dstSet = bad_dst.set_; vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopyDstDestroyedLayout); copy_descriptor.dstSet = bad_dst.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopySrcDestroyedLayout); copy_descriptor.srcSet = bad_src.set_; copy_descriptor.dstSet = good_dst.set_; vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, QueueForwardProgressFenceWait) { TEST_DESCRIPTION( "Call VkQueueSubmit with a semaphore that is already signaled but not waited on by the queue. Wait on a fence that has not " "yet been submitted to a queue."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *queue_forward_progress_message = " that has already been signaled but not waited on by queue 0x"; const char *invalid_fence_wait_message = " which has not been submitted on a Queue or during acquire next image."; VkCommandBufferObj cb1(m_device, m_commandPool); cb1.begin(); cb1.end(); VkSemaphoreCreateInfo semaphore_create_info = {}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; VkSemaphore semaphore; ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore)); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cb1.handle(); submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_commandBuffer->begin(); m_commandBuffer->end(); submit_info.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, queue_forward_progress_message); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); VkFenceCreateInfo fence_create_info = {}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; VkFence fence; ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence)); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, invalid_fence_wait_message); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); m_errorMonitor->VerifyFound(); vkDeviceWaitIdle(m_device->device()); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); } TEST_F(VkLayerTest, FramebufferIncompatible) { TEST_DESCRIPTION( "Bind a secondary command buffer with a framebuffer that does not match the framebuffer for the active renderpass."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_B8G8R8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {}; cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cbai.commandPool = m_commandPool->handle(); cbai.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; cbai.commandBufferCount = 1; VkCommandBuffer sec_cb; err = vkAllocateCommandBuffers(m_device->device(), &cbai, &sec_cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {}; VkCommandBufferInheritanceInfo cbii = {}; cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cbii.renderPass = renderPass(); cbii.framebuffer = fb; cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cbbi.pNext = NULL; cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; cbbi.pInheritanceInfo = &cbii; vkBeginCommandBuffer(sec_cb, &cbbi); vkEndCommandBuffer(sec_cb); VkCommandBufferBeginInfo cbbi2 = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi2); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is not the same as the primary command buffer's current active framebuffer "); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &sec_cb); m_errorMonitor->VerifyFound(); // Cleanup vkCmdEndRenderPass(m_commandBuffer->handle()); vkEndCommandBuffer(m_commandBuffer->handle()); vkDestroyImageView(m_device->device(), view, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); vkDestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkLayerTest, ColorBlendInvalidLogicOp) { TEST_DESCRIPTION("Attempt to use invalid VkPipelineColorBlendStateCreateInfo::logicOp value."); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().logicOp) { printf("%s Device does not support logicOp feature; skipped.\n", kSkipPrefix); return; } const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; helper.cb_ci_.logicOp = static_cast<VkLogicOp>(VK_LOGIC_OP_END_RANGE + 1); // invalid logicOp to be tested }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607"); } TEST_F(VkLayerTest, ColorBlendUnsupportedLogicOp) { TEST_DESCRIPTION("Attempt enabling VkPipelineColorBlendStateCreateInfo::logicOpEnable when logicOp feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606"); } TEST_F(VkLayerTest, ColorBlendUnsupportedDualSourceBlend) { TEST_DESCRIPTION("Attempt to use dual-source blending when dualSrcBlend feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_dsb_src_color_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR; // bad! helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608"); const auto set_dsb_dst_color_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR; // bad helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609"); const auto set_dsb_src_alpha_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC1_ALPHA; // bad helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610"); const auto set_dsb_dst_alpha_enable = [](CreatePipelineHelper &helper) { helper.cb_attachments_.blendEnable = VK_TRUE; helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD; helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; // bad! helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD; }; CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611"); } #if GTEST_IS_THREADSAFE struct thread_data_struct { VkCommandBuffer commandBuffer; VkDevice device; VkEvent event; bool bailout; }; extern "C" void *AddToCommandBuffer(void *arg) { struct thread_data_struct *data = (struct thread_data_struct *)arg; for (int i = 0; i < 80000; i++) { vkCmdSetEvent(data->commandBuffer, data->event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); if (data->bailout) { break; } } return NULL; } TEST_F(VkLayerTest, ThreadCommandBufferCollision) { test_platform_thread thread; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Calls AllocateCommandBuffers VkCommandBufferObj commandBuffer(m_device, m_commandPool); commandBuffer.begin(); VkEventCreateInfo event_info; VkEvent event; VkResult err; memset(&event_info, 0, sizeof(event_info)); event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; err = vkCreateEvent(device(), &event_info, NULL, &event); ASSERT_VK_SUCCESS(err); err = vkResetEvent(device(), event); ASSERT_VK_SUCCESS(err); struct thread_data_struct data; data.commandBuffer = commandBuffer.handle(); data.event = event; data.bailout = false; m_errorMonitor->SetBailout(&data.bailout); // First do some correct operations using multiple threads. // Add many entries to command buffer from another thread. test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data); // Make non-conflicting calls from this thread at the same time. for (int i = 0; i < 80000; i++) { uint32_t count; vkEnumeratePhysicalDevices(instance(), &count, NULL); } test_platform_thread_join(thread, NULL); // Then do some incorrect operations using multiple threads. // Add many entries to command buffer from another thread. test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data); // Add many entries to command buffer from this thread at the same time. AddToCommandBuffer(&data); test_platform_thread_join(thread, NULL); commandBuffer.end(); m_errorMonitor->SetBailout(NULL); m_errorMonitor->VerifyFound(); vkDestroyEvent(device(), event, NULL); } #endif // GTEST_IS_THREADSAFE TEST_F(VkLayerTest, InvalidSPIRVCodeSize) { TEST_DESCRIPTION("Test that errors are produced for a spirv modules with invalid code sizes"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V header"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo; struct icd_spv_header spv; spv.magic = ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = 4; moduleCreateInfo.flags = 0; vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x;\n" "void main(){\n" " gl_Position = vec4(1);\n" " x = 0;\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01376"); std::vector<unsigned int> shader; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; this->GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, vsSource, shader); module_create_info.pCode = shader.data(); // Introduce failure by making codeSize a non-multiple of 4 module_create_info.codeSize = shader.size() * sizeof(unsigned int) - 1; module_create_info.flags = 0; vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidSPIRVMagic) { TEST_DESCRIPTION("Test that an error is produced for a spirv module with a bad magic number"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V magic number"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo; struct icd_spv_header spv; spv.magic = (uint32_t)~ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; moduleCreateInfo.pNext = NULL; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = sizeof(spv) + 16; moduleCreateInfo.flags = 0; vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVertexOutputNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex output that is not consumed by the fragment stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "not consumed by fragment shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x;\n" "void main(){\n" " gl_Position = vec4(1);\n" " x = 0;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineComplexTypes) { TEST_DESCRIPTION("Smoke test for complex types across VS/FS boundary"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); char const *vsSource = "#version 450\n" "void main() {}"; char const *tcsSource = "#version 450\n" "layout(vertices=3) out;\n" "struct S { int x; };\n" "layout(location=2) patch out B { S s; } b;\n" "void main() {\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " b.s.x = 1;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "struct S { int x; };\n" "layout(location=2) patch in B { S s; } b;\n" "void main() { gl_Position = vec4(b.s.x); }\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 c;\n" "void main() { c = vec4(1); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderBadSpecialization) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *bad_specialization_message = "Specialization entry 0 (for constant id 0) references memory outside provided specialization data "; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout (constant_id = 0) const float r = 0.0f;\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(r,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineViewportStateCreateInfo vp_state_create_info = {}; vp_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO; vp_state_create_info.viewportCount = 1; VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; vp_state_create_info.pViewports = &viewport; vp_state_create_info.scissorCount = 1; VkDynamicState scissor_state = VK_DYNAMIC_STATE_SCISSOR; VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info = {}; pipeline_dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO; pipeline_dynamic_state_create_info.dynamicStateCount = 1; pipeline_dynamic_state_create_info.pDynamicStates = &scissor_state; VkPipelineShaderStageCreateInfo shader_stage_create_info[2] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; VkPipelineVertexInputStateCreateInfo vertex_input_create_info = {}; vertex_input_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO; VkPipelineInputAssemblyStateCreateInfo input_assembly_create_info = {}; input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO; input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; VkPipelineRasterizationStateCreateInfo rasterization_state_create_info = {}; rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rasterization_state_create_info.pNext = nullptr; rasterization_state_create_info.lineWidth = 1.0f; rasterization_state_create_info.rasterizerDiscardEnable = true; VkPipelineColorBlendAttachmentState color_blend_attachment_state = {}; color_blend_attachment_state.blendEnable = VK_FALSE; color_blend_attachment_state.colorWriteMask = 0xf; VkPipelineColorBlendStateCreateInfo color_blend_state_create_info = {}; color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO; color_blend_state_create_info.attachmentCount = 1; color_blend_state_create_info.pAttachments = &color_blend_attachment_state; VkGraphicsPipelineCreateInfo graphicspipe_create_info = {}; graphicspipe_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO; graphicspipe_create_info.stageCount = 2; graphicspipe_create_info.pStages = shader_stage_create_info; graphicspipe_create_info.pVertexInputState = &vertex_input_create_info; graphicspipe_create_info.pInputAssemblyState = &input_assembly_create_info; graphicspipe_create_info.pViewportState = &vp_state_create_info; graphicspipe_create_info.pRasterizationState = &rasterization_state_create_info; graphicspipe_create_info.pColorBlendState = &color_blend_state_create_info; graphicspipe_create_info.pDynamicState = &pipeline_dynamic_state_create_info; graphicspipe_create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; graphicspipe_create_info.layout = pipeline_layout.handle(); graphicspipe_create_info.renderPass = renderPass(); VkPipelineCacheCreateInfo pipeline_cache_create_info = {}; pipeline_cache_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO; VkPipelineCache pipelineCache; ASSERT_VK_SUCCESS(vkCreatePipelineCache(m_device->device(), &pipeline_cache_create_info, nullptr, &pipelineCache)); // This structure maps constant ids to data locations. const VkSpecializationMapEntry entry = // id, offset, size {0, 4, sizeof(uint32_t)}; // Challenge core validation by using a bogus offset. uint32_t data = 1; // Set up the info describing spec map and data const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(float), &data, }; shader_stage_create_info[0].pSpecializationInfo = &specialization_info; VkPipeline pipeline; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_specialization_message); vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &graphicspipe_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); vkDestroyPipelineCache(m_device->device(), pipelineCache, nullptr); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorTypeMismatch) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *descriptor_type_mismatch_message = "Type mismatch on descriptor slot 0.0 (used as type "; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); char const *vsSource = "#version 450\n" "\n" "layout (std140, set = 0, binding = 0) uniform buf {\n" " mat4 mvp;\n" "} ubuf;\n" "void main(){\n" " gl_Position = ubuf.mvp * vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_type_mismatch_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorNotAccessible) { TEST_DESCRIPTION( "Create a pipeline in which a descriptor used by a shader stage does not include that stage in its stageFlags."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *descriptor_not_accessible_message = "Shader uses descriptor slot 0.0 (used as type "; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT /*!*/, nullptr}, }); char const *vsSource = "#version 450\n" "\n" "layout (std140, set = 0, binding = 0) uniform buf {\n" " mat4 mvp;\n" "} ubuf;\n" "void main(){\n" " gl_Position = ubuf.mvp * vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_not_accessible_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderPushConstantNotAccessible) { TEST_DESCRIPTION( "Create a graphics pipeline in which a push constant range containing a push constant block member is not accessible from " "the current shader stage."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *push_constant_not_accessible_message = "Push constant range covering variable starting at offset 0 not accessible from stage VK_SHADER_STAGE_VERTEX_BIT"; char const *vsSource = "#version 450\n" "\n" "layout(push_constant, std430) uniform foo { float x; } consts;\n" "void main(){\n" " gl_Position = vec4(consts.x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location = 0) out vec4 uFragColor;\n" "void main(){\n" " uFragColor = vec4(0,1,0,1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set up a push constant range VkPushConstantRange push_constant_range = {}; // Set to the wrong stage to challenge core_validation push_constant_range.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; push_constant_range.size = 4; const VkPipelineLayoutObj pipeline_layout(m_device, {}, {push_constant_range}); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, push_constant_not_accessible_message); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderNotEnabled) { TEST_DESCRIPTION( "Create a graphics pipeline in which a capability declared by the shader requires a feature not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *feature_not_enabled_message = "Shader requires VkPhysicalDeviceFeatures::shaderFloat64 but is not enabled on the device"; // Some awkward steps are required to test with custom device features. std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Disable support for 64 bit floats features.shaderFloat64 = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " dvec4 green = vec4(0.0, 1.0, 0.0, 1.0);\n" " color = vec4(green);\n" "}\n"; VkShaderObj vs(&test_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(&test_device); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, feature_not_enabled_message); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateShaderModuleCheckBadCapability) { TEST_DESCRIPTION("Create a shader in which a capability declared by the shader is not supported."); // Note that this failure message comes from spirv-tools, specifically the validator. ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(xfb_buffer = 1) out;\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Capability TransformFeedback is not allowed by Vulkan"); std::vector<unsigned int> spv; VkShaderModuleCreateInfo module_create_info; VkShaderModule shader_module; module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO; module_create_info.pNext = NULL; this->GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, vsSource, spv); module_create_info.pCode = spv.data(); module_create_info.codeSize = spv.size() * sizeof(unsigned int); module_create_info.flags = 0; vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension1of2) { // This is a positive test, no errors expected // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 1 of 2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); // These tests require that the device support multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader using viewport array capability char const *vsSource = "#version 450\n" "#extension GL_ARB_shader_viewport_layer_array : enable\n" "void main() {\n" " gl_ViewportIndex = 1;\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); const VkPipelineLayoutObj pipe_layout(m_device, {}); m_errorMonitor->ExpectSuccess(); pipe.CreateVKPipeline(pipe_layout.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension2of2) { // This is a positive test, no errors expected // Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 2 of 2"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); // These tests require that the device support multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Vertex shader using viewport array capability char const *vsSource = "#version 450\n" "#extension GL_ARB_shader_viewport_layer_array : enable\n" "void main() {\n" " gl_ViewportIndex = 1;\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); const VkPipelineLayoutObj pipe_layout(m_device, {}); m_errorMonitor->ExpectSuccess(); pipe.CreateVKPipeline(pipe_layout.handle(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input which is not present in the outputs of the previous stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvidedInBlock) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input within an interace block, which is not present in the outputs " "of the previous stage."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchArraySize) { TEST_DESCRIPTION("Test that an error is produced for mismatched array sizes across the vertex->fragment shader interface"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0.0: 'ptr to output arr[2] of float32' vs 'ptr to input arr[1] of float32'"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out float x[2];\n" "void main(){\n" " x[0] = 0; x[1] = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x[1];\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x[0]);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for mismatched types across the vertex->fragment shader interface"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) out int x;\n" "void main(){\n" " x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" /* VS writes int */ "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchInBlock) { TEST_DESCRIPTION( "Test that an error is produced for mismatched types across the vertex->fragment shader interface, when the variable is " "contained within an interface block"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=0) int x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" /* VS writes int */ "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByLocation) { TEST_DESCRIPTION( "Test that an error is produced for location mismatches across the vertex->fragment shader interface; This should manifest " "as a not-written/not-consumed pair, but flushes out broken walking of the interfaces"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.0 which is not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=1) float x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByComponent) { TEST_DESCRIPTION( "Test that an error is produced for component mismatches across the vertex->fragment shader interface. It's not enough to " "have the same set of locations in use; matching is defined in terms of spirv variables."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.1 which is not written by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "out block { layout(location=0, component=0) float x; } outs;\n" "void main(){\n" " outs.x = 0;\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "in block { layout(location=0, component=1) float x; } ins;\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(ins.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecision) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "layout(location=0) out mediump float x;\n" "void main() { gl_Position = vec4(0); x = 1.0; }\n"; char const *fsSource = "#version 450\n" "layout(location=0) in highp float x;\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(x); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecisionBlock) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "out block { layout(location=0) mediump float x; };\n" "void main() { gl_Position = vec4(0); x = 1.0; }\n"; char const *fsSource = "#version 450\n" "in block { layout(location=0) highp float x; };\n" "layout(location=0) out vec4 color;\n" "void main() { color = vec4(x); }\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex attribute which is not consumed by the vertex shader"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribLocationMismatch) { TEST_DESCRIPTION( "Test that a warning is produced for a location mismatch on vertex attributes. This flushes out bad behavior in the " "interface walker"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=1) in float x;\n" "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); m_errorMonitor->SetUnexpectedError("Vertex shader consumes input at location 1 but not provided"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribNotProvided) { TEST_DESCRIPTION("Test that an error is produced for a vertex shader input which is not provided by a vertex attribute"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Vertex shader consumes input at location 0 but not provided"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x;\n" /* not provided */ "void main(){\n" " gl_Position = x;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineAttribTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type (float/int/uint) of an attribute and the " "vertex shader input that consumes it"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0 does not match vertex shader input type"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in int x;\n" /* attrib provided float */ "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineDuplicateStage) { TEST_DESCRIPTION("Test that an error is produced for a pipeline containing multiple shaders for the same stage"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Multiple shaders provided for stage VK_SHADER_STAGE_VERTEX_BIT"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&vs); // intentionally duplicate vertex shader attachment pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineMissingEntrypoint) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "No entrypoint found named `foo`"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this, "foo"); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineDepthStencilRequired) { m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "pDepthStencilState is NULL when rasterization is enabled and subpass uses a depth/stencil attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){ gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkAttachmentDescription attachments[] = { { 0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { 0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, }; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &refs[0], nullptr, &refs[1], 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attachments, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), rp); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineTessPatchDecorationMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a variable output from the TCS without the patch decoration, but consumed in the TES " "with the decoration."); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "is per-vertex in tessellation control shader stage but per-patch in tessellation evaluation shader stage"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(location=0) out int x[];\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " x[gl_InvocationID] = gl_InvocationID;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "layout(location=0) patch in int x;\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = x;\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineTessErrors) { TEST_DESCRIPTION("Test various errors when creating a graphics pipeline with tessellation stages active."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = 0;\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); { VkPipelineObj pipe(m_device); VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology pipe.SetInputAssembly(&iasci_bad); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass a tess control shader without a tess eval shader pipe.AddShader(&tcs); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } { VkPipelineObj pipe(m_device); VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology pipe.SetInputAssembly(&iasci_bad); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass a tess eval shader without a tess control shader pipe.AddShader(&tes); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } { VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); // Pass patch topology without tessellation shaders m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-topology-00737"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.AddShader(&tcs); pipe.AddShader(&tes); // Pass a NULL pTessellationState (with active tessellation shader stages) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00731"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); // Pass an invalid pTessellationState (bad sType) VkPipelineTessellationStateCreateInfo tsci_bad = tsci; tsci_bad.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-sType-sType"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); // Pass out-of-range patchControlPoints tsci_bad = tsci; tsci_bad.patchControlPoints = 0; pipe.SetTessellation(&tsci); pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); tsci_bad.patchControlPoints = m_device->props.limits.maxTessellationPatchSize + 1; pipe.SetTessellation(&tsci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.SetTessellation(&tsci); // Pass an invalid primitive topology VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; pipe.SetInputAssembly(&iasci_bad); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736"); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); pipe.SetInputAssembly(&iasci); } } TEST_F(VkLayerTest, CreatePipelineAttribBindingConflict) { TEST_DESCRIPTION( "Test that an error is produced for a vertex attribute setup where multiple bindings provide the same location"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Duplicate vertex input binding descriptions for binding 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* Two binding descriptions for binding 0 */ VkVertexInputBindingDescription input_bindings[2]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in float x;\n" /* attrib provided float */ "void main(){\n" " gl_Position = vec4(x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings, 2); pipe.AddVertexInputAttribs(&input_attrib, 1); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotWritten) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader which does not provide an output for one of the pipeline's color " "attachments"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Attachment 0 not written by fragment shader"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineFragmentOutputNotWrittenButMasked) { TEST_DESCRIPTION( "Test that no error is produced when the fragment shader fails to declare an output, but the corresponding attachment's " "write mask is 0."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "void main(){\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written, but also masked */ pipe.AddDefaultColorAttachment(0); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotConsumed) { TEST_DESCRIPTION( "Test that a warning is produced for a fragment shader which provides a spurious output with no matching attachment"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "fragment shader writes to output location 1 with no matching attachment"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(location=1) out vec4 y;\n" /* no matching attachment for this */ "void main(){\n" " x = vec4(1);\n" " y = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0, not written */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* FS writes CB 1, but we don't configure it */ VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type of an fragment shader output variable, and the " "format of the corresponding attachment"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not match fragment shader output type"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out ivec4 x;\n" /* not UNORM */ "void main(){\n" " x = ivec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineUniformBlockNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming a uniform block which has no corresponding binding in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in pipeline layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n" "void main(){\n" " x = vec4(bar.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelinePushConstantsNotInLayout) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming push constants which are not provided in the pipeline layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "layout(push_constant, std430) uniform foo { float x; } consts;\n" "void main(){\n" " gl_Position = vec4(consts.x);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); /* should have generated an error -- no push constant ranges provided! */ m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissing) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "consumes input attachment index 0 but not provided in subpass"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); // error here. pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment with a format having a different fundamental " "type"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "input attachment 0 format of VK_FORMAT_R8G8B8A8_UINT does not match"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // error here. pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissingArray) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description -- array case"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "consumes input attachment index 0 but not provided in subpass"); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput xs[1];\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(xs[0]);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 2, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); // error here. pipe.CreateVKPipeline(pl.handle(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateComputePipelineMissingDescriptor) { TEST_DESCRIPTION( "Test that an error is produced for a compute pipeline consuming a descriptor which is not provided in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Shader uses descriptor slot 0.0"); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main(){\n" " x = vec4(1);\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, descriptorSet.GetPipelineLayout(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkLayerTest, CreateComputePipelineDescriptorTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for a pipeline consuming a descriptor-backed resource of a mismatched type"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "but descriptor of type VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER"); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {binding}); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main() {\n" " x.x = 1.0f;\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkLayerTest, DrawTimeImageViewTypeMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when an image view type does not match the dimensionality declared in the shader"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires an image view of type VK_IMAGE_VIEW_TYPE_3D"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler3D s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texture(s, vec3(0));\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DrawTimeImageMultisampleMismatchWithPipeline) { TEST_DESCRIPTION( "Test that an error is produced when a multisampled images are consumed via singlesample images types in the shader, or " "vice versa."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires bound image to have multiple samples"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "\n" "void main() { gl_Position = vec4(0); }\n"; char const *fsSource = "#version 450\n" "\n" "layout(set=0, binding=0) uniform sampler2DMS s;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = texelFetch(s, ivec2(0), 0);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkTextureObj texture(m_device, nullptr); // THIS LINE CAUSES CRASH ON MALI VkSamplerObj sampler(m_device); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendSamplerTexture(&sampler, &texture); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); m_commandBuffer->BindDescriptorSet(descriptorSet); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // error produced here. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, AttachmentDescriptionUndefinedFormat) { TEST_DESCRIPTION("Create a render pass with an attachment description format set to VK_FORMAT_UNDEFINED"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "format is VK_FORMAT_UNDEFINED"); VkAttachmentReference color_attach = {}; color_attach.layout = VK_IMAGE_LAYOUT_GENERAL; color_attach.attachment = 0; VkSubpassDescription subpass = {}; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_attach; VkRenderPassCreateInfo rpci = {}; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_UNDEFINED; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; VkRenderPass rp; VkResult result = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); if (result == VK_SUCCESS) { vkDestroyRenderPass(m_device->device(), rp, NULL); } } TEST_F(VkLayerTest, AttachmentDescriptionInvalidFinalLayout) { TEST_DESCRIPTION("VkAttachmentDescription's finalLayout must not be UNDEFINED or PREINITIALIZED"); ASSERT_NO_FATAL_FAILURE(Init()); VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkAttachmentReference attach_ref = {}; attach_ref.attachment = 0; attach_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attach_ref; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; VkRenderPass rp = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-finalLayout-00843"); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); if (rp != VK_NULL_HANDLE) { vkDestroyRenderPass(m_device->device(), rp, NULL); } attach_desc.finalLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-finalLayout-00843"); vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyFound(); if (rp != VK_NULL_HANDLE) { vkDestroyRenderPass(m_device->device(), rp, NULL); } } TEST_F(VkLayerTest, CreateImageViewNoMemoryBoundToImage) { VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " used with no memory bound. Memory should be bound by calling vkBindImageMemory()."); ASSERT_NO_FATAL_FAILURE(Init()); // Create an image and try to create a view with no memory backing the image VkImage image; const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image, NULL); // If last error is success, it still created the view, so delete it. if (err == VK_SUCCESS) { vkDestroyImageView(m_device->device(), view, NULL); } } TEST_F(VkLayerTest, InvalidImageViewAspect) { TEST_DESCRIPTION("Create an image and try to create a view with an invalid aspectMask"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM; VkImageObj image(m_device); image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_LINEAR, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image.handle(); image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.layerCount = 1; // Cause an error by setting an invalid image aspect image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; VkImageView view; vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ExerciseGetImageSubresourceLayout) { TEST_DESCRIPTION("Test vkGetImageSubresourceLayout() valid usages"); ASSERT_NO_FATAL_FAILURE(Init()); VkSubresourceLayout subres_layout = {}; // VU 00732: image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR { const VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; // ERROR: violates VU 00732 VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, tiling); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-image-00996"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // VU 00733: The aspectMask member of pSubresource must only have a single bit set { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_METADATA_BIT; // ERROR: triggers VU 00733 subres.mipLevel = 0; subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-aspectMask-00997"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // 00739 mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 1; // ERROR: triggers VU 00739 subres.arrayLayer = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-mipLevel-01716"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } // 00740 arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created { VkImageObj img(m_device); img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT); ASSERT_TRUE(img.initialized()); VkImageSubresource subres = {}; subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subres.mipLevel = 0; subres.arrayLayer = 1; // ERROR: triggers VU 00740 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717"); vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CopyImageLayerCountMismatch) { TEST_DESCRIPTION( "Try to copy between images with the source subresource having a different layerCount than the destination subresource"); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images to copy between VkImageObj src_image_obj(m_device); VkImageObj dst_image_obj(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 4; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; src_image_obj.init(&image_create_info); ASSERT_TRUE(src_image_obj.initialized()); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; dst_image_obj.init(&image_create_info); ASSERT_TRUE(dst_image_obj.initialized()); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; // Introduce failure by forcing the dst layerCount to differ from src copyRegion.dstSubresource.layerCount = 3; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-extent-00140"); m_commandBuffer->CopyImage(src_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageLayerUnsupportedFormat) { TEST_DESCRIPTION("Creating images with unsupported formats "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Create image with unsupported format - Expect FORMAT_UNSUPPORTED VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_UNDEFINED; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImage: VkFormat for image must not be VK_FORMAT_UNDEFINED"); VkImage image; vkCreateImage(m_device->handle(), &image_create_info, NULL, &image); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewFormatMismatchUnrelated) { TEST_DESCRIPTION("Create an image with a color format, then try to create a depth view of it"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); // Load required functions PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = (PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT"); PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = (PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT"); if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil image format.\n", kSkipPrefix); return; } VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, formatProps); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.image = image.handle(); imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = depth_format; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Can't use depth format for view into color image - Expect INVALID_FORMAT m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation."); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewNoMutableFormatBit) { TEST_DESCRIPTION("Create an image view with a different format, when the image does not have MUTABLE_FORMAT bit"); if (!EnableDeviceProfileLayer()) { printf("%s Couldn't enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not present.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, &formatProps); formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, formatProps); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.image = image.handle(); imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = VK_FORMAT_B8G8R8A8_UINT; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Same compatibility class but no MUTABLE_FORMAT bit - Expect // VIEW_CREATE_ERROR m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01019"); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateImageViewDifferentClass) { TEST_DESCRIPTION("Passing bad parameters to CreateImageView"); ASSERT_NO_FATAL_FAILURE(Init()); if (!(m_device->format_properties(VK_FORMAT_R8_UINT).optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { printf("%s Device does not support R8_UINT as color attachment; skipped", kSkipPrefix); return; } VkImageCreateInfo mutImgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8_UINT, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj mutImage(m_device); mutImage.init(&mutImgInfo); ASSERT_TRUE(mutImage.initialized()); VkImageView imgView; VkImageViewCreateInfo imgViewInfo = {}; imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D; imgViewInfo.format = VK_FORMAT_B8G8R8A8_UNORM; imgViewInfo.subresourceRange.layerCount = 1; imgViewInfo.subresourceRange.baseMipLevel = 0; imgViewInfo.subresourceRange.levelCount = 1; imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imgViewInfo.image = mutImage.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01018"); vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MultiplaneIncompatibleViewFormat) { TEST_DESCRIPTION("Postive/negative tests of multiplane imageview format compatibility"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format VkFormatFeatureFlags features = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; } VkImageObj image_obj(m_device); image_obj.init(&ci); ASSERT_TRUE(image_obj.initialized()); VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image_obj.image(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8_SNORM; // Compat is VK_FORMAT_R8_UNORM ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT; // Incompatible format error VkImageView imageView = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01586"); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed imageView = VK_NULL_HANDLE; // Correct format succeeds ivci.format = VK_FORMAT_R8_UNORM; m_errorMonitor->ExpectSuccess(); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed imageView = VK_NULL_HANDLE; // Try a multiplane imageview ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->ExpectSuccess(); vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed } TEST_F(VkLayerTest, CreateImageViewInvalidSubresourceRange) { TEST_DESCRIPTION("Passing bad image subrange to CreateImageView"); ASSERT_NO_FATAL_FAILURE(Init()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); VkImageView img_view; VkImageViewCreateInfo img_view_info_template = {}; img_view_info_template.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; img_view_info_template.image = image.handle(); img_view_info_template.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY; img_view_info_template.format = image.format(); // subresourceRange to be filled later for the purposes of this test img_view_info_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_view_info_template.subresourceRange.baseMipLevel = 0; img_view_info_template.subresourceRange.levelCount = 0; img_view_info_template.subresourceRange.baseArrayLayer = 0; img_view_info_template.subresourceRange.layerCount = 0; // Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try levelCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseMipLevel + levelCount > image.mipLevels { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // These tests rely on having the Maintenance1 extension not being enabled, and are invalid on all but version 1.0 if (m_device->props.apiVersion < VK_API_VERSION_1_1) { // Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01480"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01480"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try layerCount = 0 { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCreateImageView: if pCreateInfo->viewType is VK_IMAGE_TYPE_2D_ARRAY, " "pCreateInfo->subresourceRange.layerCount must be >= 1"); // TODO: The test environment aborts the Vulkan call in parameter_validation layer before // "VUID-VkImageViewCreateInfo-subresourceRange-01719" test // m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, // "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } // Try baseArrayLayer + layerCount > image.arrayLayers { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01719"); const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2}; VkImageViewCreateInfo img_view_info = img_view_info_template; img_view_info.subresourceRange = range; vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view); m_errorMonitor->VerifyFound(); } } } TEST_F(VkLayerTest, CompressedImageMipCopyTests) { TEST_DESCRIPTION("Image/Buffer copies for higher mip levels"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); VkFormat compressed_format = VK_FORMAT_UNDEFINED; if (device_features.textureCompressionBC) { compressed_format = VK_FORMAT_BC3_SRGB_BLOCK; } else if (device_features.textureCompressionETC2) { compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; } else if (device_features.textureCompressionASTC_LDR) { compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK; } else { printf("%s No compressed formats supported - CompressedImageMipCopyTests skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = compressed_format; ci.extent = {32, 32, 1}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image(m_device); image.init(&ci); ASSERT_TRUE(image.initialized()); VkImageObj odd_image(m_device); ci.extent = {31, 32, 1}; // Mips are [31,32] [15,16] [7,8] [3,4], [1,2] [1,1] odd_image.init(&ci); ASSERT_TRUE(odd_image.initialized()); // Allocate buffers VkMemoryPropertyFlags reqs = 0; VkBufferObj buffer_1024, buffer_64, buffer_16, buffer_8; buffer_1024.init_as_src_and_dst(*m_device, 1024, reqs); buffer_64.init_as_src_and_dst(*m_device, 64, reqs); buffer_16.init_as_src_and_dst(*m_device, 16, reqs); buffer_8.init_as_src_and_dst(*m_device, 8, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.bufferOffset = 0; // start recording m_commandBuffer->begin(); // Mip level copies that work - 5 levels m_errorMonitor->ExpectSuccess(); // Mip 0 should fit in 1k buffer - 1k texels @ 1b each region.imageExtent = {32, 32, 1}; region.imageSubresource.mipLevel = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_1024.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_1024.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); // Mip 2 should fit in 64b buffer - 64 texels @ 1b each region.imageExtent = {8, 8, 1}; region.imageSubresource.mipLevel = 2; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); // Mip 3 should fit in 16b buffer - 16 texels @ 1b each region.imageExtent = {4, 4, 1}; region.imageSubresource.mipLevel = 3; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); // Mip 4&5 should fit in 16b buffer with no complaint - 4 & 1 texels @ 1b each region.imageExtent = {2, 2, 1}; region.imageSubresource.mipLevel = 4; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); region.imageExtent = {1, 1, 1}; region.imageSubresource.mipLevel = 5; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyNotFound(); // Buffer must accommodate a full compressed block, regardless of texel count m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_8.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_8.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Copy width < compressed block size, but not the full mip width region.imageExtent = {1, 2, 1}; region.imageSubresource.mipLevel = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Copy height < compressed block size but not the full mip height region.imageExtent = {2, 1, 1}; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Offsets must be multiple of compressed block size region.imageOffset = {1, 1, 0}; region.imageExtent = {1, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // Offset + extent width = mip width - should succeed region.imageOffset = {4, 4, 0}; region.imageExtent = {3, 4, 1}; region.imageSubresource.mipLevel = 2; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyNotFound(); // Offset + extent width > mip width, but still within the final compressed block - should succeed region.imageExtent = {4, 4, 1}; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyNotFound(); // Offset + extent width < mip width and not a multiple of block width - should fail region.imageExtent = {3, 3, 1}; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ImageBufferCopyTests) { TEST_DESCRIPTION("Image to buffer and buffer to image tests"); ASSERT_NO_FATAL_FAILURE(Init()); // Bail if any dimension of transfer granularity is 0. auto index = m_device->graphics_queue_node_index_; auto queue_family_properties = m_device->phy().queue_properties(); if ((queue_family_properties[index].minImageTransferGranularity.depth == 0) || (queue_family_properties[index].minImageTransferGranularity.width == 0) || (queue_family_properties[index].minImageTransferGranularity.height == 0)) { printf("%s Subresource copies are disallowed when xfer granularity (x|y|z) is 0. Skipped.\n", kSkipPrefix); return; } VkImageObj image_64k(m_device); // 128^2 texels, 64k VkImageObj image_16k(m_device); // 64^2 texels, 16k VkImageObj image_16k_depth(m_device); // 64^2 texels, depth, 16k VkImageObj ds_image_4D_1S(m_device); // 256^2 texels, 512kb (256k depth, 64k stencil, 192k pack) VkImageObj ds_image_3D_1S(m_device); // 256^2 texels, 256kb (192k depth, 64k stencil) VkImageObj ds_image_2D(m_device); // 256^2 texels, 128k (128k depth) VkImageObj ds_image_1S(m_device); // 256^2 texels, 64k (64k stencil) image_64k.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UINT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_16k.Init(64, 64, 1, VK_FORMAT_R8G8B8A8_UINT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_64k.initialized()); ASSERT_TRUE(image_16k.initialized()); // Verify all needed Depth/Stencil formats are supported bool missing_ds_support = false; VkFormatProperties props = {0, 0, 0}; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D24_UNORM_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &props); missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0); missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0; missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0; if (!missing_ds_support) { image_16k_depth.Init(64, 64, 1, VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image_16k_depth.initialized()); ds_image_4D_1S.Init( 256, 256, 1, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_4D_1S.initialized()); ds_image_3D_1S.Init( 256, 256, 1, VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_3D_1S.initialized()); ds_image_2D.Init( 256, 256, 1, VK_FORMAT_D16_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_2D.initialized()); ds_image_1S.Init( 256, 256, 1, VK_FORMAT_S8_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(ds_image_1S.initialized()); } // Allocate buffers VkBufferObj buffer_256k, buffer_128k, buffer_64k, buffer_16k; VkMemoryPropertyFlags reqs = 0; buffer_256k.init_as_src_and_dst(*m_device, 262144, reqs); // 256k buffer_128k.init_as_src_and_dst(*m_device, 131072, reqs); // 128k buffer_64k.init_as_src_and_dst(*m_device, 65536, reqs); // 64k buffer_16k.init_as_src_and_dst(*m_device, 16384, reqs); // 16k VkBufferImageCopy region = {}; region.bufferRowLength = 0; region.bufferImageHeight = 0; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; region.imageSubresource.layerCount = 1; region.imageOffset = {0, 0, 0}; region.imageExtent = {64, 64, 1}; region.bufferOffset = 0; // attempt copies before putting command buffer in recording state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // start recording m_commandBuffer->begin(); // successful copies m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); region.imageOffset.x = 16; // 16k copy, offset requires larger image vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); region.imageExtent.height = 78; // > 16k copy requires larger buffer & image vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); region.imageOffset.x = 0; region.imageExtent.height = 64; region.bufferOffset = 256; // 16k copy with buffer offset, requires larger buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyNotFound(); // image/buffer too small (extent too large) on copy to image region.imageExtent = {65, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // image/buffer too small (offset) on copy to image region.imageExtent = {64, 64, 1}; region.imageOffset = {0, 4, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); // image/buffer too small on copy to buffer region.imageExtent = {64, 64, 1}; region.imageOffset = {0, 0, 0}; region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // buffer too small vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent = {64, 65, 1}; region.bufferOffset = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // image too small vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // buffer size OK but rowlength causes loose packing m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); region.imageExtent = {64, 64, 1}; region.bufferRowLength = 68; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // An extent with zero area should produce a warning, but no error m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT, "} has zero area"); region.imageExtent.width = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // aspect bits region.imageExtent = {64, 64, 1}; region.bufferRowLength = 0; region.bufferImageHeight = 0; if (!missing_ds_support) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00212"); // more than 1 aspect bit set region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00211"); // different mis-matched aspect region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-aspectMask-00211"); // mis-matched aspect region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // Out-of-range mip levels should fail region.imageSubresource.mipLevel = image_16k.create_info().mipLevels + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // unavoidable "region exceeds image bounds" for non-existent mip vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01701"); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); // unavoidable "region exceeds image bounds" for non-existent mip vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageSubresource.mipLevel = 0; // Out-of-range array layers should fail region.imageSubresource.baseArrayLayer = image_16k.create_info().arrayLayers; region.imageSubresource.layerCount = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01702"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageSubresource.baseArrayLayer = 0; // Layout mismatch should fail m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189"); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); // Test Depth/Stencil copies if (missing_ds_support) { printf("%s Depth / Stencil formats unsupported - skipping D/S tests.\n", kSkipPrefix); } else { VkBufferImageCopy ds_region = {}; ds_region.bufferOffset = 0; ds_region.bufferRowLength = 0; ds_region.bufferImageHeight = 0; ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; ds_region.imageSubresource.mipLevel = 0; ds_region.imageSubresource.baseArrayLayer = 0; ds_region.imageSubresource.layerCount = 1; ds_region.imageOffset = {0, 0, 0}; ds_region.imageExtent = {256, 256, 1}; // Depth copies that should succeed m_errorMonitor->ExpectSuccess(); // Extract 4b depth per texel, pack into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Extract 3b depth per texel, pack (loose) into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Copy 2b depth per texel, into 128k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_128k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); // Depth copies that should fail ds_region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 4b depth per texel, pack into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 3b depth per texel, pack (loose) into 256k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_256k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 2b depth per texel, into 128k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_128k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); // Stencil copies that should succeed ds_region.bufferOffset = 0; ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); // Copy 1b depth per texel, into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyNotFound(); // Stencil copies that should fail m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer ds_region.bufferRowLength = 260; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); ds_region.bufferRowLength = 0; ds_region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 1b depth per texel, into 64k buffer vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_64k.handle(), 1, &ds_region); m_errorMonitor->VerifyFound(); } // Test compressed formats, if supported VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (!(device_features.textureCompressionBC || device_features.textureCompressionETC2 || device_features.textureCompressionASTC_LDR)) { printf("%s No compressed formats supported - block compression tests skipped.\n", kSkipPrefix); } else { VkImageObj image_16k_4x4comp(m_device); // 128^2 texels as 32^2 compressed (4x4) blocks, 16k VkImageObj image_NPOT_4x4comp(m_device); // 130^2 texels as 33^2 compressed (4x4) blocks if (device_features.textureCompressionBC) { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } else if (device_features.textureCompressionETC2) { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } else { image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); } ASSERT_TRUE(image_16k_4x4comp.initialized()); // Just fits m_errorMonitor->ExpectSuccess(); region.imageExtent = {128, 128, 1}; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyNotFound(); // with offset, too big for buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183"); region.bufferOffset = 16; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.bufferOffset = 0; // extents that are not a multiple of compressed block size m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00207"); // extent width not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity region.imageExtent.width = 66; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.width = 128; m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-imageExtent-00208"); // extent height not a multiple of block size m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity region.imageExtent.height = 2; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.height = 128; // TODO: All available compressed formats are 2D, with block depth of 1. Unable to provoke VU_01277. // non-multiple extents are allowed if at the far edge of a non-block-multiple image - these should pass m_errorMonitor->ExpectSuccess(); region.imageExtent.width = 66; region.imageOffset.x = 64; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); region.imageExtent.width = 16; region.imageOffset.x = 0; region.imageExtent.height = 2; region.imageOffset.y = 128; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyNotFound(); region.imageOffset = {0, 0, 0}; // buffer offset must be a multiple of texel block size (16) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00206"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193"); region.imageExtent = {64, 64, 1}; region.bufferOffset = 24; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // rowlength not a multiple of block width (4) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00203"); region.bufferOffset = 0; region.bufferRowLength = 130; region.bufferImageHeight = 0; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); // imageheight not a multiple of block height (4) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00204"); region.bufferRowLength = 0; region.bufferImageHeight = 130; vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, MiscImageLayerTests) { TEST_DESCRIPTION("Image-related tests that don't belong elsewhere"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: Ideally we should check if a format is supported, before using it. VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 64bpp ASSERT_TRUE(image.initialized()); VkBufferObj buffer; VkMemoryPropertyFlags reqs = 0; buffer.init_as_src(*m_device, 128 * 128 * 8, reqs); VkBufferImageCopy region = {}; region.bufferRowLength = 128; region.bufferImageHeight = 128; region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT region.imageSubresource.layerCount = 1; region.imageExtent.height = 4; region.imageExtent.width = 4; region.imageExtent.depth = 1; VkImageObj image2(m_device); image2.Init(128, 128, 1, VK_FORMAT_R8G8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 16bpp ASSERT_TRUE(image2.initialized()); VkBufferObj buffer2; VkMemoryPropertyFlags reqs2 = 0; buffer2.init_as_src(*m_device, 128 * 128 * 2, reqs2); VkBufferImageCopy region2 = {}; region2.bufferRowLength = 128; region2.bufferImageHeight = 128; region2.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; // layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT region2.imageSubresource.layerCount = 1; region2.imageExtent.height = 4; region2.imageExtent.width = 4; region2.imageExtent.depth = 1; m_commandBuffer->begin(); // Image must have offset.z of 0 and extent.depth of 1 // Introduce failure by setting imageExtent.depth to 0 region.imageExtent.depth = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageExtent.depth = 1; // Image must have offset.z of 0 and extent.depth of 1 // Introduce failure by setting imageOffset.z to 4 // Note: Also (unavoidably) triggers 'region exceeds image' #1228 region.imageOffset.z = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); region.imageOffset.z = 0; // BufferOffset must be a multiple of the calling command's VkImage parameter's texel size // Introduce failure by setting bufferOffset to 1 and 1/2 texels region.bufferOffset = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); // BufferOffset must be a multiple of 4 // Introduce failure by setting bufferOffset to a value not divisible by 4 region2.bufferOffset = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00194"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer2.handle(), image2.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region2); m_errorMonitor->VerifyFound(); // BufferRowLength must be 0, or greater than or equal to the width member of imageExtent region.bufferOffset = 0; region.imageExtent.height = 128; region.imageExtent.width = 128; // Introduce failure by setting bufferRowLength > 0 but less than width region.bufferRowLength = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00195"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); // BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent region.bufferRowLength = 128; // Introduce failure by setting bufferRowHeight > 0 but less than height region.bufferImageHeight = 64; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00196"); vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region); m_errorMonitor->VerifyFound(); region.bufferImageHeight = 128; VkImageObj intImage1(m_device); intImage1.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0); intImage1.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageObj intImage2(m_device); intImage2.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); intImage2.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL); VkImageBlit blitRegion = {}; blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.srcSubresource.baseArrayLayer = 0; blitRegion.srcSubresource.layerCount = 1; blitRegion.srcSubresource.mipLevel = 0; blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; blitRegion.dstSubresource.baseArrayLayer = 0; blitRegion.dstSubresource.layerCount = 1; blitRegion.dstSubresource.mipLevel = 0; blitRegion.srcOffsets[0] = {128, 0, 0}; blitRegion.srcOffsets[1] = {128, 128, 1}; blitRegion.dstOffsets[0] = {0, 128, 0}; blitRegion.dstOffsets[1] = {128, 128, 1}; // Look for NULL-blit warning m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdBlitImage: pRegions[0].srcOffsets specify a zero-volume area."); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdBlitImage: pRegions[0].dstOffsets specify a zero-volume area."); vkCmdBlitImage(m_commandBuffer->handle(), intImage1.handle(), intImage1.Layout(), intImage2.handle(), intImage2.Layout(), 1, &blitRegion, VK_FILTER_LINEAR); m_errorMonitor->VerifyFound(); } VkResult GPDIFPHelper(VkPhysicalDevice dev, const VkImageCreateInfo *ci, VkImageFormatProperties *limits = nullptr) { VkImageFormatProperties tmp_limits; limits = limits ? limits : &tmp_limits; return vkGetPhysicalDeviceImageFormatProperties(dev, ci->format, ci->imageType, ci->tiling, ci->usage, ci->flags, limits); } TEST_F(VkLayerTest, CreateImageMiscErrors) { TEST_DESCRIPTION("Misc leftover valid usage errors in VkImageCreateInfo struct"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {64, 64, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); { VkImageCreateInfo image_ci = safe_image_ci; image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT; image_ci.queueFamilyIndexCount = 2; image_ci.pQueueFamilyIndices = nullptr; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00941"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT; image_ci.queueFamilyIndexCount = 1; const uint32_t queue_family = 0; image_ci.pQueueFamilyIndices = &queue_family; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00942"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.format = VK_FORMAT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00943"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; image_ci.imageType = VK_IMAGE_TYPE_1D; image_ci.extent = {64, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.imageType = VK_IMAGE_TYPE_3D; image_ci.extent = {4, 4, 4}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.imageType = VK_IMAGE_TYPE_3D; image_ci.extent = {4, 4, 4}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.tiling = VK_IMAGE_TILING_LINEAR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support image_ci.samples = VK_SAMPLE_COUNT_4_BIT; image_ci.mipLevels = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT; image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00969"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } // InitialLayout not VK_IMAGE_LAYOUT_UNDEFINED or VK_IMAGE_LAYOUT_PREDEFINED { VkImageCreateInfo image_ci = safe_image_ci; image_ci.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-initialLayout-00993"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CreateImageMinLimitsViolation) { TEST_DESCRIPTION("Create invalid image with invalid parameters violation minimum limit, such as being zero."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; enum Dimension { kWidth = 0x1, kHeight = 0x2, kDepth = 0x4 }; for (underlying_type<Dimension>::type bad_dimensions = 0x1; bad_dimensions < 0x8; ++bad_dimensions) { VkExtent3D extent = {1, 1, 1}; if (bad_dimensions & kWidth) { extent.width = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00944"); } if (bad_dimensions & kHeight) { extent.height = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00945"); } if (bad_dimensions & kDepth) { extent.depth = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00946"); } VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_3D; // has to be 3D otherwise it might trigger the non-1 error instead bad_image_ci.extent = extent; vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.mipLevels = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00947"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.arrayLayers = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-00948"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; bad_image_ci.arrayLayers = 5; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.arrayLayers = 6; bad_image_ci.extent = {64, 63, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_1D; bad_image_ci.extent = {64, 2, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_1D; bad_image_ci.extent = {64, 1, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_2D; bad_image_ci.extent = {64, 64, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); bad_image_ci.imageType = VK_IMAGE_TYPE_2D; bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; bad_image_ci.arrayLayers = 6; bad_image_ci.extent = {64, 64, 2}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo bad_image_ci = safe_image_ci; bad_image_ci.imageType = VK_IMAGE_TYPE_3D; bad_image_ci.arrayLayers = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00961"); vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } } VkFormat FindFormatLinearWithoutMips(VkPhysicalDevice gpu, VkImageCreateInfo image_ci) { image_ci.tiling = VK_IMAGE_TILING_LINEAR; const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { image_ci.format = format; // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; VkImageFormatProperties img_limits; if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && img_limits.maxMipLevels == 1) return format; } return VK_FORMAT_UNDEFINED; } bool FindFormatWithoutSamples(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) { const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { image_ci.format = format; // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; for (VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_64_BIT; samples > 0; samples = static_cast<VkSampleCountFlagBits>(samples >> 1)) { image_ci.samples = samples; VkImageFormatProperties img_limits; if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && !(img_limits.sampleCounts & samples)) return true; } } return false; } TEST_F(VkLayerTest, CreateImageMaxLimitsViolation) { TEST_DESCRIPTION("Create invalid image with invalid parameters exceeding physical device limits."); ASSERT_NO_FATAL_FAILURE(Init()); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; { VkImageCreateInfo image_ci = safe_image_ci; image_ci.imageType = VK_IMAGE_TYPE_1D; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimension1D); if (img_limits.maxExtent.width != UINT32_MAX) { image_ci.extent = {img_limits.maxExtent.width + 1, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00951"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s 1D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.imageType = VK_IMAGE_TYPE_2D; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimension2D); img_limits.maxExtent.height = std::max(img_limits.maxExtent.height, dev_limits.maxImageDimension2D); if (img_limits.maxExtent.width != UINT32_MAX) { image_ci.extent = {img_limits.maxExtent.width + 1, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s 2D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } if (img_limits.maxExtent.height != UINT32_MAX) { image_ci.extent = {1, img_limits.maxExtent.height + 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s 2D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; image_ci.arrayLayers = 6; image_ci.imageType = VK_IMAGE_TYPE_2D; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimensionCube); img_limits.maxExtent.height = std::max(img_limits.maxExtent.height, dev_limits.maxImageDimensionCube); if (img_limits.maxExtent.width != UINT32_MAX || img_limits.maxExtent.height != UINT32_MAX) { image_ci.extent = {img_limits.maxExtent.width + 1, img_limits.maxExtent.height + 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00953"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s CUBE VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.imageType = VK_IMAGE_TYPE_3D; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); // WORKAROUND for dev_sim and mock_icd not containing valid format limits yet img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimension3D); img_limits.maxExtent.height = std::max(img_limits.maxExtent.height, dev_limits.maxImageDimension3D); img_limits.maxExtent.depth = std::max(img_limits.maxExtent.depth, dev_limits.maxImageDimension3D); if (img_limits.maxExtent.width != UINT32_MAX) { image_ci.extent = {img_limits.maxExtent.width + 1, 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00955"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s 3D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } if (img_limits.maxExtent.height != UINT32_MAX) { image_ci.extent = {1, img_limits.maxExtent.height + 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00955"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s 3D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } if (img_limits.maxExtent.depth != UINT32_MAX) { image_ci.extent = {1, 1, img_limits.maxExtent.depth + 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00955"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s 3D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.extent = {8, 8, 1}; image_ci.mipLevels = 4 + 1; // 4 = log2(8) + 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); image_ci.extent = {8, 15, 1}; image_ci.mipLevels = 4 + 1; // 4 = floor(log2(15)) + 1 m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.tiling = VK_IMAGE_TILING_LINEAR; image_ci.extent = {64, 64, 1}; image_ci.format = FindFormatLinearWithoutMips(gpu(), image_ci); image_ci.mipLevels = 2; if (image_ci.format != VK_FORMAT_UNDEFINED) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00959"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Cannot find a format to test maxMipLevels limit; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); if (img_limits.maxArrayLayers != UINT32_MAX) { image_ci.arrayLayers = img_limits.maxArrayLayers + 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-00960"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkImageFormatProperties::maxArrayLayers is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; bool found = FindFormatWithoutSamples(gpu(), image_ci); if (found) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00967"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Could not find a format with some unsupported samples; skipping part of test.\n", kSkipPrefix); } } { VkImageCreateInfo image_ci = safe_image_ci; image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // (any attachment bit) VkImageFormatProperties img_limits; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits)); if (dev_limits.maxFramebufferWidth != UINT32_MAX) { image_ci.extent = {dev_limits.maxFramebufferWidth + 1, 64, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00964"); if (image_ci.extent.width > img_limits.maxExtent.width) { // might also trip image limits VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952"); } vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkPhysicalDeviceLimits::maxFramebufferWidth is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } if (dev_limits.maxFramebufferHeight != UINT32_MAX) { image_ci.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; // try different one too image_ci.extent = {64, dev_limits.maxFramebufferHeight + 1, 1}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00965"); if (image_ci.extent.height > img_limits.maxExtent.height) { // might also trip image limits VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952"); } vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s VkPhysicalDeviceLimits::maxFramebufferHeight is already UINT32_MAX; skipping part of test.\n", kSkipPrefix); } } } bool FindUnsupportedImage(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) { const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 const std::vector<VkImageTiling> tilings = {VK_IMAGE_TILING_LINEAR, VK_IMAGE_TILING_OPTIMAL}; for (const auto tiling : tilings) { image_ci.tiling = tiling; for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { image_ci.format = format; VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; if (!(features & core_filter)) continue; // We wand supported by features, but not by ImageFormatProperties // get as many usage flags as possible image_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; if (features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_SAMPLED_BIT; if (features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT; if (features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; if (features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; VkImageFormatProperties img_limits; if (VK_ERROR_FORMAT_NOT_SUPPORTED == GPDIFPHelper(gpu, &image_ci, &img_limits)) { return true; } } } return false; } VkFormat FindFormatWithoutFeatures(VkPhysicalDevice gpu, VkImageTiling tiling, VkFormatFeatureFlags undesired_features = UINT32_MAX) { const VkFormat first_vk_format = static_cast<VkFormat>(1); const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184 for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) { VkFormatProperties format_props; vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props); const VkFormatFeatureFlags core_filter = 0x1FFF; const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter : format_props.optimalTilingFeatures & core_filter; const auto valid_features = features & core_filter; if (undesired_features == UINT32_MAX) { if (!valid_features) return format; } else { if (valid_features && !(valid_features & undesired_features)) return format; } } return VK_FORMAT_UNDEFINED; } TEST_F(VkLayerTest, CreateImageFormatSupportErrors) { TEST_DESCRIPTION("Valid usage errors of format support in VkImageCreateInfo struct"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); VkImage null_image; // throwaway target for all the vkCreateImage VkImageCreateInfo tmp_img_ci = {}; tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; tmp_img_ci.flags = 0; // assumably any is supported tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096 tmp_img_ci.mipLevels = 1; // any is supported tmp_img_ci.arrayLayers = 1; // limit is 256 tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR // if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1 tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL; tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; const VkImageCreateInfo safe_image_ci = tmp_img_ci; ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci)); { VkImageCreateInfo image_ci = safe_image_ci; bool found = FindUnsupportedImage(gpu(), image_ci); if (found) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00940"); vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image); m_errorMonitor->VerifyFound(); } else { printf("%s Failed to find image unsupported by vkGetPhysicalDeviceImageFormatProperties; skipping test.\n", kSkipPrefix); } } } TEST_F(VkLayerTest, CopyImageTypeExtentMismatch) { // Image copy tests where format type and extents don't match ASSERT_NO_FATAL_FAILURE(Init()); VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Create 1D image VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); // 2D image ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); // 3D image ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {32, 32, 8}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); // 2D image array ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; ci.arrayLayers = 8; VkImageObj image_2D_array(m_device); image_2D_array.init(&ci); ASSERT_TRUE(image_2D_array.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 1, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Sanity check m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); // 1D texture w/ offset.y > 0. Source = VU 09c00124, dest = 09c00130 copy_region.srcOffset.y = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.y = 0; copy_region.dstOffset.y = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.y = 0; // 1D texture w/ extent.height > 1. Source = VU 09c00124, dest = 09c00130 copy_region.extent.height = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.extent.height = 1; // 1D texture w/ offset.z > 0. Source = VU 09c00df2, dest = 09c00df4 copy_region.srcOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.z = 0; copy_region.dstOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.z = 0; // 1D texture w/ extent.depth > 1. Source = VU 09c00df2, dest = 09c00df4 copy_region.extent.depth = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.extent.depth = 1; // 2D texture w/ offset.z > 0. Source = VU 09c00df6, dest = 09c00df8 copy_region.extent = {16, 16, 1}; copy_region.srcOffset.z = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01787"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src) m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset.z = 0; copy_region.dstOffset.z = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01788"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst) m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset.z = 0; // 3D texture accessing an array layer other than 0. VU 09c0011a copy_region.extent = {4, 4, 1}; copy_region.srcSubresource.baseArrayLayer = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00141"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698"); // also 'too many layers' m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageTypeExtentMismatchMaintenance1) { // Image copy tests where format type and extents don't match and the Maintenance1 extension is enabled ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s Maintenance1 extension cannot be enabled, test skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkFormat image_format = VK_FORMAT_R8G8B8A8_UNORM; VkFormatProperties format_props; // TODO: Remove this check if or when devsim handles extensions. // The chosen format has mandatory support the transfer src and dst format features when Maitenance1 is enabled. However, our // use of devsim and the mock ICD violate this guarantee. vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_format, &format_props); if (!(format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)) { printf("%s Maintenance1 extension is not supported.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_1D; ci.format = image_format; ci.extent = {32, 1, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Create 1D image VkImageObj image_1D(m_device); image_1D.init(&ci); ASSERT_TRUE(image_1D.initialized()); // 2D image ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; VkImageObj image_2D(m_device); image_2D.init(&ci); ASSERT_TRUE(image_2D.initialized()); // 3D image ci.imageType = VK_IMAGE_TYPE_3D; ci.extent = {32, 32, 8}; VkImageObj image_3D(m_device); image_3D.init(&ci); ASSERT_TRUE(image_3D.initialized()); // 2D image array ci.imageType = VK_IMAGE_TYPE_2D; ci.extent = {32, 32, 1}; ci.arrayLayers = 8; VkImageObj image_2D_array(m_device); image_2D_array.init(&ci); ASSERT_TRUE(image_2D_array.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 1, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Copy from layer not present copy_region.srcSubresource.baseArrayLayer = 4; copy_region.srcSubresource.layerCount = 6; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698"); m_commandBuffer->CopyImage(image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; // Copy to layer not present copy_region.dstSubresource.baseArrayLayer = 1; copy_region.dstSubresource.layerCount = 8; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstSubresource-01699"); m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.layerCount = 1; m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageCompressedBlockAlignment) { // Image copy tests on compressed images with block alignment errors ASSERT_NO_FATAL_FAILURE(Init()); // Select a compressed format and verify support VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); VkFormat compressed_format = VK_FORMAT_UNDEFINED; if (device_features.textureCompressionBC) { compressed_format = VK_FORMAT_BC3_SRGB_BLOCK; } else if (device_features.textureCompressionETC2) { compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK; } else if (device_features.textureCompressionASTC_LDR) { compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = compressed_format; ci.extent = {64, 64, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageFormatProperties img_prop = {}; if (VK_SUCCESS != vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), ci.format, ci.imageType, ci.tiling, ci.usage, ci.flags, &img_prop)) { printf("%s No compressed formats supported - CopyImageCompressedBlockAlignment skipped.\n", kSkipPrefix); return; } // Create images VkImageObj image_1(m_device); image_1.init(&ci); ASSERT_TRUE(image_1.initialized()); ci.extent = {62, 62, 1}; // slightly smaller and not divisible by block size VkImageObj image_2(m_device); image_2.init(&ci); ASSERT_TRUE(image_2.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {48, 48, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Sanity check m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); std::string vuid; bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (m_device->props.apiVersion >= VK_API_VERSION_1_1)); // Src, Dest offsets must be multiples of compressed block sizes {4, 4, 1} // Image transfer granularity gets set to compressed block size, so an ITG error is also (unavoidably) triggered. vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157"; copy_region.srcOffset = {2, 4, 0}; // source width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {12, 1, 0}; // source height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {0, 0, 0}; vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162"; copy_region.dstOffset = {1, 0, 0}; // dest width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {4, 1, 0}; // dest height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; // Copy extent must be multiples of compressed block sizes {4, 4, 1} if not full width/height vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158"; copy_region.extent = {62, 60, 1}; // source width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159"; copy_region.extent = {60, 62, 1}; // source height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163"; copy_region.extent = {62, 60, 1}; // dest width m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164"; copy_region.extent = {60, 62, 1}; // dest height m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Note: "VUID-VkImageCopy-extent-00160", "VUID-VkImageCopy-extent-00165", "VUID-VkImageCopy-srcImage-01730", // "VUID-VkImageCopy-dstImage-01734" // There are currently no supported compressed formats with a block depth other than 1, // so impossible to create a 'not a multiple' condition for depth. m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageSinglePlane422Alignment) { // Image copy tests on single-plane _422 formats with block alignment errors // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Select a _422 format and verify support VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8B8G8R8_422_UNORM_KHR; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Single-plane _422 image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } // Create images ci.extent = {64, 64, 1}; VkImageObj image_422(m_device); image_422.init(&ci); ASSERT_TRUE(image_422.initialized()); ci.extent = {64, 64, 1}; ci.format = VK_FORMAT_R8G8B8A8_UNORM; VkImageObj image_ucmp(m_device); image_ucmp.init(&ci); ASSERT_TRUE(image_ucmp.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {48, 48, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; // Src offsets must be multiples of compressed block sizes copy_region.srcOffset = {3, 4, 0}; // source offset x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01727"); m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcOffset = {0, 0, 0}; // Dst offsets must be multiples of compressed block sizes copy_region.dstOffset = {1, 0, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01731"); m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; // Copy extent must be multiples of compressed block sizes if not full width/height copy_region.extent = {31, 60, 1}; // 422 source, extent.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01728"); m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // 422 dest, extent.x m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01732"); m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstOffset = {0, 0, 0}; m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageMultiplaneAspectBits) { // Image copy tests on multiplane images with aspect errors // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Select multi-plane formats and verify support VkFormat mp3_format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR; VkFormat mp2_format = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR; VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = mp2_format; ci.extent = {256, 256, 1}; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify formats VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); ci.format = mp3_format; supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image formats not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } // Create images VkImageObj mp3_image(m_device); mp3_image.init(&ci); ASSERT_TRUE(mp3_image.initialized()); ci.format = mp2_format; VkImageObj mp2_image(m_device); mp2_image.init(&ci); ASSERT_TRUE(mp2_image.initialized()); ci.format = VK_FORMAT_D24_UNORM_S8_UINT; VkImageObj sp_image(m_device); sp_image.init(&ci); ASSERT_TRUE(sp_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {128, 128, 1}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01552"); m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01553"); m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01554"); m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01555"); m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01556"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01557"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also m_commandBuffer->CopyImage(sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageSrcSizeExceeded) { // Image copy with source region specified greater than src image size ASSERT_NO_FATAL_FAILURE(Init()); // Create images with full mip chain VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 32, 8}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj src_image(m_device); src_image.init(&ci); ASSERT_TRUE(src_image.initialized()); // Dest image with one more mip level ci.extent = {64, 64, 16}; ci.mipLevels = 7; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; VkImageObj dst_image(m_device); dst_image.init(&ci); ASSERT_TRUE(dst_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 32, 8}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); // Source exceeded in x-dim, VU 01202 copy_region.srcOffset.x = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); // General "contained within" VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00144"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Source exceeded in y-dim, VU 01203 copy_region.srcOffset.x = 0; copy_region.extent.height = 48; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Source exceeded in z-dim, VU 01204 copy_region.extent = {4, 4, 4}; copy_region.srcSubresource.mipLevel = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageDstSizeExceeded) { // Image copy with dest region specified greater than dest image size ASSERT_NO_FATAL_FAILURE(Init()); // Create images with full mip chain VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_3D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {32, 32, 8}; ci.mipLevels = 6; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj dst_image(m_device); dst_image.init(&ci); ASSERT_TRUE(dst_image.initialized()); // Src image with one more mip level ci.extent = {64, 64, 16}; ci.mipLevels = 7; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; VkImageObj src_image(m_device); src_image.init(&ci); ASSERT_TRUE(src_image.initialized()); m_commandBuffer->begin(); VkImageCopy copy_region; copy_region.extent = {32, 32, 8}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); // Dest exceeded in x-dim, VU 01205 copy_region.dstOffset.x = 4; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); // General "contained within" VU m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00150"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Dest exceeded in y-dim, VU 01206 copy_region.dstOffset.x = 0; copy_region.extent.height = 48; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); // Dest exceeded in z-dim, VU 01207 copy_region.extent = {4, 4, 4}; copy_region.dstSubresource.mipLevel = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageFormatSizeMismatch) { VkResult err; bool pass; // Create color images with different format sizes and try to copy between them m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00135"); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; // Introduce failure by creating second image with a different-sized format. image_create_info.format = VK_FORMAT_R5G5B5A1_UNORM_PACK16; VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_create_info.format, &properties); if (properties.optimalTilingFeatures == 0) { vkDestroyImage(m_device->device(), srcImage, NULL); printf("%s Image format not supported; skipped.\n", kSkipPrefix); return; } err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_commandBuffer->CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, CopyImageDepthStencilFormatMismatch) { ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't depth stencil image format.\n", kSkipPrefix); return; } VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties); if (properties.optimalTilingFeatures == 0) { printf("%s Image format not supported; skipped.\n", kSkipPrefix); return; } VkImageObj srcImage(m_device); srcImage.Init(32, 32, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(dstImage.initialized()); // Create two images of different types and try to copy between them m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset.x = 0; copyRegion.srcOffset.y = 0; copyRegion.srcOffset.z = 0; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset.x = 0; copyRegion.dstOffset.y = 0; copyRegion.dstOffset.z = 0; copyRegion.extent.width = 1; copyRegion.extent.height = 1; copyRegion.extent.depth = 1; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdCopyImage called with unmatched source and dest image depth"); m_commandBuffer->CopyImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CopyImageSampleCountMismatch) { TEST_DESCRIPTION("Image copies with sample count mis-matches"); ASSERT_NO_FATAL_FAILURE(Init()); VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0, &image_format_properties); if ((0 == (VK_SAMPLE_COUNT_2_BIT & image_format_properties.sampleCounts)) || (0 == (VK_SAMPLE_COUNT_4_BIT & image_format_properties.sampleCounts))) { printf("%s Image multi-sample support not found; skipped.\n", kSkipPrefix); return; } VkImageCreateInfo ci; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_R8G8B8A8_UNORM; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.queueFamilyIndexCount = 0; ci.pQueueFamilyIndices = NULL; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VkImageObj image1(m_device); image1.init(&ci); ASSERT_TRUE(image1.initialized()); ci.samples = VK_SAMPLE_COUNT_2_BIT; VkImageObj image2(m_device); image2.init(&ci); ASSERT_TRUE(image2.initialized()); ci.samples = VK_SAMPLE_COUNT_4_BIT; VkImageObj image4(m_device); image4.init(&ci); ASSERT_TRUE(image4.initialized()); m_commandBuffer->begin(); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {0, 0, 0}; copyRegion.extent = {128, 128, 1}; // Copy a single sample image to/from a multi-sample image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image1.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image1.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Copy between multi-sample images with different sample counts m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136"); vkCmdCopyImage(m_commandBuffer->handle(), image4.handle(), VK_IMAGE_LAYOUT_GENERAL, image2.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CopyImageAspectMismatch) { TEST_DESCRIPTION("Image copies with aspect mask errors"); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_format = FindSupportedDepthStencilFormat(gpu()); if (!ds_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } VkFormatProperties properties; vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties); if (properties.optimalTilingFeatures == 0) { printf("%s Image format VK_FORMAT_D32_SFLOAT not supported; skipped.\n", kSkipPrefix); return; } VkImageObj color_image(m_device), ds_image(m_device), depth_image(m_device); color_image.Init(128, 128, 1, VK_FORMAT_R32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); depth_image.Init(128, 128, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ds_image.Init(128, 128, 1, ds_format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(color_image.initialized()); ASSERT_TRUE(depth_image.initialized()); ASSERT_TRUE(ds_image.initialized()); VkImageCopy copyRegion; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {64, 0, 0}; copyRegion.extent = {64, 128, 1}; // Submitting command before command buffer is in recording state m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer"); // "VUID-vkCmdCopyImage-commandBuffer-recording"); vkCmdCopyImage(m_commandBuffer->handle(), depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->begin(); // Src and dest aspect masks don't match copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) || (m_device->props.apiVersion >= VK_API_VERSION_1_1)); std::string vuid = (ycbcr ? "VUID-VkImageCopy-srcImage-01551" : "VUID-VkImageCopy-aspectMask-00137"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Illegal combinations of aspect bits copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // same test for dstSubresource copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Metadata aspect is illegal copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // same test for dstSubresource copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168"); // These aspect/format mismatches are redundant but unavoidable here m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; // Aspect mask doesn't match source image format m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142"); // Again redundant but unavoidable m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats"); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); // Aspect mask doesn't match dest image format copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143"); // Again redundant but unavoidable m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats"); vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ResolveImageLowSampleCount) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdResolveImage called with source sample count less than 2."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of sample count 1 and try to Resolve between them VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkImageObj srcImage(m_device); srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ResolveImageHighSampleCount) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdResolveImage called with dest sample count greater than 1."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of sample count 4 and try to Resolve between them VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_4_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; VkImageObj srcImage(m_device); srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); VkImageObj dstImage(m_device); dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ResolveImageFormatMismatch) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdResolveImage called with unmatched source and dest formats."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); // Set format to something other than source image image_create_info.format = VK_FORMAT_R32_SFLOAT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, ResolveImageTypeMismatch) { VkResult err; bool pass; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "vkCmdResolveImage called with unmatched source and dest image types."); ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImage srcImage; VkImage dstImage; VkDeviceMemory srcMem; VkDeviceMemory destMem; VkMemoryRequirements memReqs; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage); ASSERT_VK_SUCCESS(err); image_create_info.imageType = VK_IMAGE_TYPE_1D; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage); ASSERT_VK_SUCCESS(err); // Allocate memory VkMemoryAllocateInfo memAlloc = {}; memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memAlloc.pNext = NULL; memAlloc.allocationSize = 0; memAlloc.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs); memAlloc.allocationSize = memReqs.size; pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); // Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest? // VK_IMAGE_LAYOUT_UNDEFINED = 0, // VK_IMAGE_LAYOUT_GENERAL = 1, VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), srcImage, NULL); vkDestroyImage(m_device->device(), dstImage, NULL); vkFreeMemory(m_device->device(), srcMem, NULL); vkFreeMemory(m_device->device(), destMem, NULL); } TEST_F(VkLayerTest, ResolveImageLayoutMismatch) { ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImageObj srcImage(m_device); VkImageObj dstImage(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.flags = 0; srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // source image must have valid contents before resolve VkClearColorValue clear_color = {{0, 0, 0, 0}}; VkImageSubresourceRange subresource = {}; subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresource.layerCount = 1; subresource.levelCount = 1; srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource); srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; // source image layout mismatch m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcImageLayout-00260"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_GENERAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); // dst image layout mismatch m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstImageLayout-00262"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ResolveInvalidSubresource) { ASSERT_NO_FATAL_FAILURE(Init()); // Create two images of different types and try to copy between them VkImageObj srcImage(m_device); VkImageObj dstImage(m_device); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 32; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_2_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.flags = 0; srcImage.init(&image_create_info); ASSERT_TRUE(srcImage.initialized()); // Note: Some implementations expect color attachment usage for any // multisample surface image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; dstImage.init(&image_create_info); ASSERT_TRUE(dstImage.initialized()); m_commandBuffer->begin(); // source image must have valid contents before resolve VkClearColorValue clear_color = {{0, 0, 0, 0}}; VkImageSubresourceRange subresource = {}; subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; subresource.layerCount = 1; subresource.levelCount = 1; srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource); srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); VkImageResolve resolveRegion; resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.srcSubresource.mipLevel = 0; resolveRegion.srcSubresource.baseArrayLayer = 0; resolveRegion.srcSubresource.layerCount = 1; resolveRegion.srcOffset.x = 0; resolveRegion.srcOffset.y = 0; resolveRegion.srcOffset.z = 0; resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; resolveRegion.dstSubresource.mipLevel = 0; resolveRegion.dstSubresource.baseArrayLayer = 0; resolveRegion.dstSubresource.layerCount = 1; resolveRegion.dstOffset.x = 0; resolveRegion.dstOffset.y = 0; resolveRegion.dstOffset.z = 0; resolveRegion.extent.width = 1; resolveRegion.extent.height = 1; resolveRegion.extent.depth = 1; // invalid source mip level resolveRegion.srcSubresource.mipLevel = image_create_info.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01709"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.srcSubresource.mipLevel = 0; // invalid dest mip level resolveRegion.dstSubresource.mipLevel = image_create_info.mipLevels; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01710"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.dstSubresource.mipLevel = 0; // invalid source array layer range resolveRegion.srcSubresource.baseArrayLayer = image_create_info.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01711"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.srcSubresource.baseArrayLayer = 0; // invalid dest array layer range resolveRegion.dstSubresource.baseArrayLayer = image_create_info.arrayLayers; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01712"); m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion); m_errorMonitor->VerifyFound(); resolveRegion.dstSubresource.baseArrayLayer = 0; m_commandBuffer->end(); } TEST_F(VkLayerTest, DepthStencilImageViewWithColorAspectBitError) { // Create a single Image descriptor and cause it to first hit an error due // to using a DS format, then cause it to hit error due to COLOR_BIT not // set in aspect // The image format check comes 2nd in validation so we trigger it first, // then when we cause aspect fail next, bad format check will be preempted VkResult err; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Combination depth/stencil image formats can have only the "); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptorSet; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); VkImage image_bad; VkImage image_good; // One bad format and one good format for Color attachment const VkFormat tex_format_bad = depth_format; const VkFormat tex_format_good = VK_FORMAT_B8G8R8A8_UNORM; const int32_t tex_width = 32; const int32_t tex_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = tex_format_bad; image_create_info.extent.width = tex_width; image_create_info.extent.height = tex_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; image_create_info.flags = 0; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_bad); ASSERT_VK_SUCCESS(err); image_create_info.format = tex_format_good; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_good); ASSERT_VK_SUCCESS(err); // ---Bind image memory--- VkMemoryRequirements img_mem_reqs; vkGetImageMemoryRequirements(m_device->device(), image_bad, &img_mem_reqs); VkMemoryAllocateInfo image_alloc_info = {}; image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; image_alloc_info.pNext = NULL; image_alloc_info.memoryTypeIndex = 0; image_alloc_info.allocationSize = img_mem_reqs.size; bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &image_alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT); ASSERT_TRUE(pass); VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &image_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindImageMemory(m_device->device(), image_bad, mem, 0); ASSERT_VK_SUCCESS(err); // ----------------------- VkImageViewCreateInfo image_view_create_info = {}; image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; image_view_create_info.image = image_bad; image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D; image_view_create_info.format = tex_format_bad; image_view_create_info.subresourceRange.baseArrayLayer = 0; image_view_create_info.subresourceRange.baseMipLevel = 0; image_view_create_info.subresourceRange.layerCount = 1; image_view_create_info.subresourceRange.levelCount = 1; image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; VkImageView view; err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view); m_errorMonitor->VerifyFound(); vkDestroyImage(m_device->device(), image_bad, NULL); vkDestroyImage(m_device->device(), image_good, NULL); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); vkFreeMemory(m_device->device(), mem, NULL); } TEST_F(VkLayerTest, ClearImageErrors) { TEST_DESCRIPTION("Call ClearColorImage w/ a depth|stencil image and ClearDepthStencilImage with a color image."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); // Color image VkClearColorValue clear_color; memset(clear_color.uint32, 0, sizeof(uint32_t) * 4); VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; const VkFormat color_format = VK_FORMAT_B8G8R8A8_UNORM; const int32_t img_width = 32; const int32_t img_height = 32; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = color_format; image_create_info.extent.width = img_width; image_create_info.extent.height = img_height; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_LINEAR; image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; vk_testing::Image color_image_no_transfer; color_image_no_transfer.init(*m_device, image_create_info, reqs); image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image color_image; color_image.init(*m_device, image_create_info, reqs); const VkImageSubresourceRange color_range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT); // Depth/Stencil image VkClearDepthStencilValue clear_value = {0}; reqs = 0; // don't need HOST_VISIBLE DS image VkImageCreateInfo ds_image_create_info = vk_testing::Image::create_info(); ds_image_create_info.imageType = VK_IMAGE_TYPE_2D; ds_image_create_info.format = VK_FORMAT_D16_UNORM; ds_image_create_info.extent.width = 64; ds_image_create_info.extent.height = 64; ds_image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; ds_image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image ds_image; ds_image.init(*m_device, ds_image_create_info, reqs); const VkImageSubresourceRange ds_range = vk_testing::Image::subresource_range(ds_image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with depth/stencil image."); vkCmdClearColorImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT"); vkCmdClearColorImage(m_commandBuffer->handle(), color_image_no_transfer.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range); m_errorMonitor->VerifyFound(); // Call CmdClearDepthStencilImage with color image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearDepthStencilImage called without a depth/stencil image."); vkCmdClearDepthStencilImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_value, 1, &ds_range); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CommandQueueFlags) { TEST_DESCRIPTION( "Allocate a command buffer on a queue that does not support graphics and try to issue a graphics-only command"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t queueFamilyIndex = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT); if (queueFamilyIndex == UINT32_MAX) { printf("%s Non-graphics queue family not found; skipped.\n", kSkipPrefix); return; } else { // Create command pool on a non-graphics queue VkCommandPoolObj command_pool(m_device, queueFamilyIndex); // Setup command buffer on pool VkCommandBufferObj command_buffer(m_device, &command_pool); command_buffer.begin(); // Issue a graphics only command m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool"); VkViewport viewport = {0, 0, 16, 16, 0, 1}; command_buffer.SetViewport(0, 1, &viewport); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ExecuteUnrecordedSecondaryCB) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB in the initial state"); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); // never record secondary m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ExecuteUnrecordedPrimaryCB) { TEST_DESCRIPTION("Attempt vkQueueSubmit with a CB in the initial state"); ASSERT_NO_FATAL_FAILURE(Init()); // never record m_commandBuffer VkSubmitInfo si = {}; si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; si.commandBufferCount = 1; si.pCommandBuffers = &m_commandBuffer->handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00072"); vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ExecuteSecondaryCBWithLayoutMismatch) { TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB with incorrect initial layout."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 32; image_create_info.extent.height = 1; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_create_info.flags = 0; VkImageSubresource image_sub = VkImageObj::subresource(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0); VkImageSubresourceRange image_sub_range = VkImageObj::subresource_range(image_sub); VkImageObj image(m_device); image.init(&image_create_info); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier image_barrier = image.image_memory_barrier(0, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, image_sub_range); auto pipeline = [&image_barrier](const VkCommandBufferObj &cb, VkImageLayout old_layout, VkImageLayout new_layout) { image_barrier.oldLayout = old_layout; image_barrier.newLayout = new_layout; vkCmdPipelineBarrier(cb.handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &image_barrier); }; // Validate that mismatched use of image layout in secondary command buffer is caught at record time VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); secondary.begin(); pipeline(secondary, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); secondary.end(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001"); m_commandBuffer->begin(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyFound(); // Validate that we've tracked the changes from the secondary CB correctly m_errorMonitor->ExpectSuccess(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); m_commandBuffer->reset(); secondary.reset(); // Validate that UNDEFINED doesn't false positive on us secondary.begin(); pipeline(secondary, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); secondary.end(); m_commandBuffer->begin(); pipeline(*m_commandBuffer, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); m_errorMonitor->ExpectSuccess(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, ExtensionNotEnabled) { TEST_DESCRIPTION("Validate that using an API from an unenabled extension returns an error"); // Do NOT enable VK_KHR_maintenance1 ASSERT_NO_FATAL_FAILURE(Init()); // TODO: Main1 is ALWAYS enabled in 1.1. Re-write test with an extension present in both 1.0 and 1.1 if (m_device->props.apiVersion >= VK_API_VERSION_1_1) { printf("%s Device has apiVersion greater than 1.0 -- skipping extension enabled check.\n", kSkipPrefix); return; } // Find address of extension API PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR = (PFN_vkTrimCommandPoolKHR)vkGetDeviceProcAddr(m_device->handle(), "vkTrimCommandPoolKHR"); if (vkTrimCommandPoolKHR == nullptr) { printf("%s Maintenance1 not supported by device; skipped.\n", kSkipPrefix); return; } m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "but its required extension VK_KHR_maintenance1 has not been enabled"); vkTrimCommandPoolKHR(m_device->handle(), m_commandPool->handle(), (VkCommandPoolTrimFlags)0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, Maintenance1AndNegativeViewport) { TEST_DESCRIPTION("Attempt to enable AMD_negative_viewport_height and Maintenance1_KHR extension simultaneously"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (!((DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) && (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)))) { printf("%s Maintenance1 and AMD_negative viewport height extensions not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props); const char *extension_names[2] = {"VK_KHR_maintenance1", "VK_AMD_negative_viewport_height"}; VkDevice testDevice; VkDeviceCreateInfo device_create_info = {}; auto features = m_device->phy().features(); device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; device_create_info.pNext = NULL; device_create_info.queueCreateInfoCount = queue_info.size(); device_create_info.pQueueCreateInfos = queue_info.data(); device_create_info.enabledLayerCount = 0; device_create_info.ppEnabledLayerNames = NULL; device_create_info.enabledExtensionCount = 2; device_create_info.ppEnabledExtensionNames = (const char *const *)extension_names; device_create_info.pEnabledFeatures = &features; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374"); // The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do // not use the LunarG loader (e.g. Android) will not see the message and the test will fail. m_errorMonitor->SetUnexpectedError("Failed to create device chain."); vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCreateDescriptorPool) { TEST_DESCRIPTION("Attempt to create descriptor pool with invalid parameters"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t default_descriptor_count = 1; const VkDescriptorPoolSize dp_size_template{VK_DESCRIPTOR_TYPE_SAMPLER, default_descriptor_count}; const VkDescriptorPoolCreateInfo dp_ci_template{VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, // pNext 0, // flags 1, // maxSets 1, // poolSizeCount &dp_size_template}; // try maxSets = 0 { VkDescriptorPoolCreateInfo invalid_dp_ci = dp_ci_template; invalid_dp_ci.maxSets = 0; // invalid maxSets value m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301"); { VkDescriptorPool pool; vkCreateDescriptorPool(m_device->device(), &invalid_dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } // try descriptorCount = 0 { VkDescriptorPoolSize invalid_dp_size = dp_size_template; invalid_dp_size.descriptorCount = 0; // invalid descriptorCount value VkDescriptorPoolCreateInfo dp_ci = dp_ci_template; dp_ci.pPoolSizes = &invalid_dp_size; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-descriptorCount-00302"); { VkDescriptorPool pool; vkCreateDescriptorPool(m_device->device(), &dp_ci, nullptr, &pool); } m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, InvalidCreateBufferSize) { TEST_DESCRIPTION("Attempt to create VkBuffer with size of zero"); ASSERT_NO_FATAL_FAILURE(Init()); VkBufferCreateInfo info = {}; info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-size-00912"); info.size = 0; VkBuffer buffer; vkCreateBuffer(m_device->device(), &info, nullptr, &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SetDynViewportParamTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetViewport without multiViewport feature"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0}; const VkViewport viewports[] = {vp, vp}; m_commandBuffer->begin(); // array tests m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 2, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 0, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225"); vkCmdSetViewport(m_commandBuffer->handle(), 1, 2, viewports); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); // core viewport tests using std::vector; struct TestCase { VkViewport vp; std::string veid; }; // not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit const auto one_past_max_w = NearestGreater(static_cast<float>(m_device->props.limits.maxViewportDimensions[0])); const auto one_past_max_h = NearestGreater(static_cast<float>(m_device->props.limits.maxViewportDimensions[1])); const auto min_bound = m_device->props.limits.viewportBoundsRange[0]; const auto max_bound = m_device->props.limits.viewportBoundsRange[1]; const auto one_before_min_bounds = NearestSmaller(min_bound); const auto one_past_max_bounds = NearestGreater(max_bound); const auto below_zero = NearestSmaller(0.0f); const auto past_one = NearestGreater(1.0f); vector<TestCase> test_cases = { {{0.0, 0.0, 0.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"}, {{0.0, 0.0, one_past_max_w, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01771"}, {{0.0, 0.0, NAN, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"}, {{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, "VUID-VkViewport-height-01773"}, {{one_before_min_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"}, {{one_past_max_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"}, {{NAN, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"}, {{0.0, one_before_min_bounds, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"}, {{0.0, NAN, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"}, {{max_bound, 0.0, 1.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"}, {{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, "VUID-VkViewport-y-01233"}, {{0.0, 0.0, 64.0, 64.0, below_zero, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, past_one, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, NAN, 1.0}, "VUID-VkViewport-minDepth-01234"}, {{0.0, 0.0, 64.0, 64.0, 0.0, below_zero}, "VUID-VkViewport-maxDepth-01235"}, {{0.0, 0.0, 64.0, 64.0, 0.0, past_one}, "VUID-VkViewport-maxDepth-01235"}, {{0.0, 0.0, 64.0, 64.0, 0.0, NAN}, "VUID-VkViewport-maxDepth-01235"}, }; if (m_device->props.apiVersion < VK_API_VERSION_1_1) { test_cases.push_back({{0.0, 0.0, 64.0, 0.0, 0.0, 1.0}, "VUID-VkViewport-height-01772"}); test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01772"}); } else { test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01773"}); } for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.veid); vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp); m_errorMonitor->VerifyFound(); } } void NegHeightViewportTests(VkDeviceObj *m_device, VkCommandBufferObj *m_commandBuffer, ErrorMonitor *m_errorMonitor) { const auto &limits = m_device->props.limits; m_commandBuffer->begin(); using std::vector; struct TestCase { VkViewport vp; vector<std::string> vuids; }; // not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit const auto one_before_min_h = NearestSmaller(-static_cast<float>(limits.maxViewportDimensions[1])); const auto one_past_max_h = NearestGreater(static_cast<float>(limits.maxViewportDimensions[1])); const auto min_bound = limits.viewportBoundsRange[0]; const auto max_bound = limits.viewportBoundsRange[1]; const auto one_before_min_bound = NearestSmaller(min_bound); const auto one_past_max_bound = NearestGreater(max_bound); const vector<TestCase> test_cases = {{{0.0, 0.0, 64.0, one_before_min_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}}, {{0.0, one_before_min_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01775"}}, {{0.0, one_past_max_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01776"}}, {{0.0, min_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01777"}}, {{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01233"}}}; for (const auto &test_case : test_cases) { for (const auto vuid : test_case.vuids) { if (vuid == "VUID-Undefined") m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is less than VkPhysicalDeviceLimits::viewportBoundsRange[0]"); else m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid); } vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, SetDynViewportParamMaintenance1Tests) { TEST_DESCRIPTION("Verify errors are detected on misuse of SetViewport with a negative viewport extension enabled."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s VK_KHR_maintenance1 extension not supported -- skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); NegHeightViewportTests(m_device, m_commandBuffer, m_errorMonitor); } TEST_F(VkLayerTest, SetDynViewportParamMultiviewportTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetViewport with multiViewport feature enabled"); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } const auto max_viewports = m_device->props.limits.maxViewports; const uint32_t too_many_viewports = 65536 + 1; // let's say this is too much to allocate pViewports for m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter"); vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports, nullptr); m_errorMonitor->VerifyFound(); if (max_viewports >= too_many_viewports) { printf( "%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping " "part of " "test.\n", kSkipPrefix); return; } const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0}; const std::vector<VkViewport> viewports(max_viewports + 1, vp); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports + 1, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), max_viewports, 1, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), 1, max_viewports, viewports.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223"); vkCmdSetViewport(m_commandBuffer->handle(), max_viewports + 1, 0, viewports.data()); m_errorMonitor->VerifyFound(); } // // POSITIVE VALIDATION TESTS // // These tests do not expect to encounter ANY validation errors pass only if this is true TEST_F(VkPositiveLayerTest, UncompressedToCompressedImageCopy) { TEST_DESCRIPTION("Image copies between compressed and uncompressed images"); ASSERT_NO_FATAL_FAILURE(Init()); // Verify format support // Size-compatible (64-bit) formats. Uncompressed is 64 bits per texel, compressed is 64 bits per 4x4 block (or 4bpt). if (!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR) || !ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) { printf("%s Required formats/features not supported - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix); return; } VkImageObj uncomp_10x10t_image(m_device); // Size = 10 * 10 * 64 = 6400 VkImageObj comp_10x10b_40x40t_image(m_device); // Size = 40 * 40 * 4 = 6400 uncomp_10x10t_image.Init(10, 10, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); comp_10x10b_40x40t_image.Init(40, 40, 1, VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); if (!uncomp_10x10t_image.initialized() || !comp_10x10b_40x40t_image.initialized()) { printf("%s Unable to initialize surfaces - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix); return; } // Both copies represent the same number of bytes. Bytes Per Texel = 1 for bc6, 16 for uncompressed // Copy compressed to uncompressed VkImageCopy copy_region = {}; copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; copy_region.srcSubresource.mipLevel = 0; copy_region.dstSubresource.mipLevel = 0; copy_region.srcSubresource.baseArrayLayer = 0; copy_region.dstSubresource.baseArrayLayer = 0; copy_region.srcSubresource.layerCount = 1; copy_region.dstSubresource.layerCount = 1; copy_region.srcOffset = {0, 0, 0}; copy_region.dstOffset = {0, 0, 0}; m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); // Copy from uncompressed to compressed copy_region.extent = {10, 10, 1}; // Dimensions in (uncompressed) texels vkCmdCopyImage(m_commandBuffer->handle(), uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); // And from compressed to uncompressed copy_region.extent = {40, 40, 1}; // Dimensions in (compressed) texels vkCmdCopyImage(m_commandBuffer->handle(), comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkPositiveLayerTest, DeleteDescriptorSetLayoutsBeforeDescriptorSets) { TEST_DESCRIPTION("Create DSLayouts and DescriptorSets and then delete the DSLayouts before the DescriptorSets."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkResult err; m_errorMonitor->ExpectSuccess(); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool_one; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool_one); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSet descriptorSet; { const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool_one; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet); ASSERT_VK_SUCCESS(err); } // ds_layout destroyed err = vkFreeDescriptorSets(m_device->device(), ds_pool_one, 1, &descriptorSet); vkDestroyDescriptorPool(m_device->device(), ds_pool_one, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CommandPoolDeleteWithReferences) { TEST_DESCRIPTION("Ensure the validation layers bookkeeping tracks the implicit command buffer frees."); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandPoolCreateInfo cmd_pool_info = {}; cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_info.pNext = NULL; cmd_pool_info.queueFamilyIndex = m_device->graphics_queue_node_index_; cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; cmd_pool_info.flags = 0; VkCommandPool secondary_cmd_pool; VkResult res = vkCreateCommandPool(m_device->handle(), &cmd_pool_info, NULL, &secondary_cmd_pool); ASSERT_VK_SUCCESS(res); VkCommandBufferAllocateInfo cmdalloc = vk_testing::CommandBuffer::create_info(secondary_cmd_pool); cmdalloc.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; VkCommandBuffer secondary_cmds; res = vkAllocateCommandBuffers(m_device->handle(), &cmdalloc, &secondary_cmds); VkCommandBufferInheritanceInfo cmd_buf_inheritance_info = {}; cmd_buf_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; cmd_buf_inheritance_info.pNext = NULL; cmd_buf_inheritance_info.renderPass = VK_NULL_HANDLE; cmd_buf_inheritance_info.subpass = 0; cmd_buf_inheritance_info.framebuffer = VK_NULL_HANDLE; cmd_buf_inheritance_info.occlusionQueryEnable = VK_FALSE; cmd_buf_inheritance_info.queryFlags = 0; cmd_buf_inheritance_info.pipelineStatistics = 0; VkCommandBufferBeginInfo secondary_begin = {}; secondary_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; secondary_begin.pNext = NULL; secondary_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; secondary_begin.pInheritanceInfo = &cmd_buf_inheritance_info; res = vkBeginCommandBuffer(secondary_cmds, &secondary_begin); ASSERT_VK_SUCCESS(res); vkEndCommandBuffer(secondary_cmds); m_commandBuffer->begin(); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_cmds); m_commandBuffer->end(); // DestroyCommandPool *implicitly* frees the command buffers allocated from it vkDestroyCommandPool(m_device->handle(), secondary_cmd_pool, NULL); // If bookkeeping has been lax, validating the reset will attempt to touch deleted data res = vkResetCommandPool(m_device->handle(), m_commandPool->handle(), 0); ASSERT_VK_SUCCESS(res); } TEST_F(VkLayerTest, SecondaryCommandBufferClearColorAttachmentsRenderArea) { TEST_DESCRIPTION( "Create a secondary command buffer with CmdClearAttachments call that has a rect outside of renderPass renderArea"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_inheritance_info.renderPass = m_renderPass; command_buffer_inheritance_info.framebuffer = m_framebuffer; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; // x extent of 257 exceeds render area of 256 VkClearRect clear_rect = {{{0, 0}, {257, 32}}}; vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect); vkEndCommandBuffer(secondary_command_buffer); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016"); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer); m_errorMonitor->VerifyFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferClearColorAttachments) { TEST_DESCRIPTION("Create a secondary command buffer and record a CmdClearAttachments call into it"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_inheritance_info.renderPass = m_renderPass; command_buffer_inheritance_info.framebuffer = m_framebuffer; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 0; color_attachment.clearValue.color.float32[1] = 0; color_attachment.clearValue.color.float32[2] = 0; color_attachment.clearValue.color.float32[3] = 0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {32, 32}}}; vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect); vkEndCommandBuffer(secondary_command_buffer); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, SecondaryCommandBufferImageLayoutTransitions) { TEST_DESCRIPTION("Perform an image layout transition in a secondary command buffer followed by a transition in the primary."); VkResult err; m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s Couldn't find depth stencil format.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Allocate a secondary and primary cmd buffer VkCommandBufferAllocateInfo command_buffer_allocate_info = {}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = m_commandPool->handle(); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY; command_buffer_allocate_info.commandBufferCount = 1; VkCommandBuffer secondary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer)); command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; VkCommandBuffer primary_command_buffer; ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &primary_command_buffer)); VkCommandBufferBeginInfo command_buffer_begin_info = {}; VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {}; command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT; command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info; err = vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(secondary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); err = vkEndCommandBuffer(secondary_command_buffer); ASSERT_VK_SUCCESS(err); // Now update primary cmd buffer to execute secondary and transitions image command_buffer_begin_info.pInheritanceInfo = nullptr; err = vkBeginCommandBuffer(primary_command_buffer, &command_buffer_begin_info); ASSERT_VK_SUCCESS(err); vkCmdExecuteCommands(primary_command_buffer, 1, &secondary_command_buffer); VkImageMemoryBarrier img_barrier2 = {}; img_barrier2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier2.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier2.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier2.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier2.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; img_barrier2.image = image.handle(); img_barrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier2.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; img_barrier2.subresourceRange.baseArrayLayer = 0; img_barrier2.subresourceRange.baseMipLevel = 0; img_barrier2.subresourceRange.layerCount = 1; img_barrier2.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(primary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier2); err = vkEndCommandBuffer(primary_command_buffer); ASSERT_VK_SUCCESS(err); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &primary_command_buffer; err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); err = vkDeviceWaitIdle(m_device->device()); ASSERT_VK_SUCCESS(err); vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &secondary_command_buffer); vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &primary_command_buffer); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, IgnoreUnrelatedDescriptor) { TEST_DESCRIPTION( "Ensure that the vkUpdateDescriptorSets validation code is ignoring VkWriteDescriptorSet members that are not related to " "the descriptor type specified by VkWriteDescriptorSet::descriptorType. Correct validation behavior will result in the " "test running to completion without validation errors."); const uintptr_t invalid_ptr = 0xcdcdcdcd; ASSERT_NO_FATAL_FAILURE(Init()); // Image Case { m_errorMonitor->ExpectSuccess(); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorImageInfo image_info = {}; image_info.imageView = view; image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; descriptor_write.pImageInfo = &image_info; // Set pBufferInfo and pTexelBufferView to invalid values, which should // be // ignored for descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pBufferInfo. descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr); descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); } // Buffer Case { m_errorMonitor->ExpectSuccess(); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkDescriptorBufferInfo buffer_info = {}; buffer_info.buffer = buffer; buffer_info.offset = 0; buffer_info.range = 1024; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.pBufferInfo = &buffer_info; // Set pImageInfo and pTexelBufferView to invalid values, which should // be // ignored for descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pImageInfo. descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr); descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } // Texel Buffer Case { m_errorMonitor->ExpectSuccess(); VkBuffer buffer; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory buffer_memory; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); memory_info.allocationSize = memory_reqs.size; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); VkBufferViewCreateInfo buff_view_ci = {}; buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO; buff_view_ci.buffer = buffer; buff_view_ci.format = VK_FORMAT_R8_UNORM; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buffer_view; err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buffer_view); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &buffer_view; // Set pImageInfo and pBufferInfo to invalid values, which should be // ignored for descriptorType == // VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER. // This will most likely produce a crash if the parameter_validation // layer // does not correctly ignore pImageInfo and pBufferInfo. descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr); descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr); vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); vkDestroyBufferView(m_device->device(), buffer_view, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); } } TEST_F(VkPositiveLayerTest, ImmutableSamplerOnlyDescriptor) { TEST_DESCRIPTION("Bind a DescriptorSet with only an immutable sampler and make sure that we don't warn for no update."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); VkSampler sampler; VkResult err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0, nullptr); m_errorMonitor->VerifyNotFound(); vkDestroySampler(m_device->device(), sampler, NULL); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, DuplicateDescriptorBinding) { TEST_DESCRIPTION("Create a descriptor set layout with a duplicate binding number."); ASSERT_NO_FATAL_FAILURE(Init()); // Create layout where two binding #s are "1" static const uint32_t NUM_BINDINGS = 3; VkDescriptorSetLayoutBinding dsl_binding[NUM_BINDINGS] = {}; dsl_binding[0].binding = 1; dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[0].descriptorCount = 1; dsl_binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[0].pImmutableSamplers = NULL; dsl_binding[1].binding = 0; dsl_binding[1].descriptorCount = 1; dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[1].descriptorCount = 1; dsl_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[1].pImmutableSamplers = NULL; dsl_binding[2].binding = 1; // Duplicate binding should cause error dsl_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding[2].descriptorCount = 1; dsl_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding[2].pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = {}; ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; ds_layout_ci.pNext = NULL; ds_layout_ci.bindingCount = NUM_BINDINGS; ds_layout_ci.pBindings = dsl_binding; VkDescriptorSetLayout ds_layout; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279"); vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPushDescriptorSetLayout) { TEST_DESCRIPTION("Create a push descriptor set layout with invalid bindings."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Find address of extension call and make the call PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); assert(vkGetPhysicalDeviceProperties2KHR != nullptr); // Get the push descriptor limits auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>(); auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop); vkGetPhysicalDeviceProperties2KHR(m_device->phy().handle(), &prop2); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; // Note that as binding is referenced in ds_layout_ci, it is effectively in the closure by reference as well. auto test_create_ds_layout = [&ds_layout_ci, this](std::string error) { VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error); vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); }; // Starting with the initial VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC type set above.. test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; test_create_ds_layout( "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); // This is the same VUID as above, just a second error condition. if (!(push_descriptor_prop.maxPushDescriptors == std::numeric_limits<uint32_t>::max())) { binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; binding.descriptorCount = push_descriptor_prop.maxPushDescriptors + 1; test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); } else { printf("%s maxPushDescriptors is set to maximum unit32_t value, skipping 'out of range test'.\n", kSkipPrefix); } } TEST_F(VkLayerTest, PushDescriptorSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create a push descriptor set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; std::string error = "Attempted to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281"); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayoutWithoutExtension) { TEST_DESCRIPTION("Create an update_after_bind set layout without loading the needed extension."); ASSERT_NO_FATAL_FAILURE(Init()); auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; std::string error = "Attemped to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT in "; error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension "; error = error + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME; error = error + " has not been enabled."; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str()); VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, DescriptorIndexingSetLayout) { TEST_DESCRIPTION("Exercise various create/allocate-time errors related to VK_EXT_descriptor_indexing."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexingFeatures = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexingFeatures); vkGetPhysicalDeviceFeatures2(gpu(), &features2); indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkDescriptorBindingFlagsEXT flags = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT; auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(); flags_create_info.bindingCount = 1; flags_create_info.pBindingFlags = &flags; VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info); ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; // VU for VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount flags_create_info.bindingCount = 2; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002"); VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); flags_create_info.bindingCount = 1; // set is missing UPDATE_AFTER_BIND_POOL flag. m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000"); // binding uses a feature we disabled m_errorMonitor->SetDesiredFailureMsg( VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUniformBufferUpdateAfterBind-03005"); err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 0; flags_create_info.bindingCount = 0; err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; // mismatch between descriptor set and pool m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); if (indexingFeatures.descriptorBindingVariableDescriptorCount) { ds_layout_ci.flags = 0; ds_layout_ci.bindingCount = 1; flags_create_info.bindingCount = 1; flags = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT; err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); pool_size = {binding.descriptorType, binding.descriptorCount}; dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto count_alloc_info = lvl_init_struct<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(); count_alloc_info.descriptorSetCount = 1; // Set variable count larger than what was in the descriptor binding uint32_t variable_count = 2; count_alloc_info.pDescriptorCounts = &variable_count; ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(&count_alloc_info); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); } } TEST_F(VkLayerTest, DescriptorIndexingUpdateAfterBind) { TEST_DESCRIPTION("Exercise errors for updating a descriptor set after it is bound."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME); } else { printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix); return; } // Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind auto indexingFeatures = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(); auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexingFeatures); vkGetPhysicalDeviceFeatures2(gpu(), &features2); indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE; if (VK_FALSE == indexingFeatures.descriptorBindingStorageBufferUpdateAfterBind) { printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix); return; } if (VK_FALSE == features2.features.fragmentStoresAndAtomics) { printf("%s Test requires (unsupported) fragmentStoresAndAtomics, skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorBindingFlagsEXT flags[2] = {0, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT}; auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(); flags_create_info.bindingCount = 2; flags_create_info.pBindingFlags = &flags[0]; // Descriptor set has two bindings - only the second is update_after_bind VkDescriptorSetLayoutBinding binding[2] = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; ds_layout_ci.bindingCount = 2; ds_layout_ci.pBindings = &binding[0]; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); VkDescriptorPoolSize pool_sizes[2] = { {binding[0].descriptorType, binding[0].descriptorCount}, {binding[1].descriptorType, binding[1].descriptorCount}, }; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT; dspci.poolSizeCount = 2; dspci.pPoolSizes = &pool_sizes[0]; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); ASSERT_VK_SUCCESS(err); VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 1024; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; VkBuffer dyub; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs); VkMemoryAllocateInfo mem_alloc_info = {}; mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc_info.allocationSize = mem_reqs.size; m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub, mem, 0); ASSERT_VK_SUCCESS(err); VkDescriptorBufferInfo buffInfo[2] = {}; buffInfo[0].buffer = dyub; buffInfo[0].offset = 0; buffInfo[0].range = 1024; VkWriteDescriptorSet descriptor_write[2] = {}; descriptor_write[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write[0].dstSet = ds; descriptor_write[0].dstBinding = 0; descriptor_write[0].descriptorCount = 1; descriptor_write[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write[0].pBufferInfo = buffInfo; descriptor_write[1] = descriptor_write[0]; descriptor_write[1].dstBinding = 1; descriptor_write[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); // Create a dummy pipeline, since VL inspects which bindings are actually used at draw time char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(set=0, binding=0) uniform foo0 { float x0; } bar0;\n" "layout(set=0, binding=1) buffer foo1 { float x1; } bar1;\n" "void main(){\n" " color = vec4(bar0.x0 + bar1.x1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.CreateVKPipeline(pipeline_layout, m_renderPass); // Make both bindings valid before binding to the command buffer vkUpdateDescriptorSets(m_device->device(), 2, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); // Two subtests. First only updates the update_after_bind binding and expects // no error. Second updates the other binding and expects an error when the // command buffer is ended. for (uint32_t i = 0; i < 2; ++i) { m_commandBuffer->begin(); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, &ds, 0, NULL); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); vkCmdDraw(m_commandBuffer->handle(), 0, 0, 0, 0); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); // Valid to update binding 1 after being bound vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[1], 0, NULL); m_errorMonitor->VerifyNotFound(); if (i == 0) { // expect no errors m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } else { // Invalid to update binding 0 after being bound. But the error is actually // generated during vkEndCommandBuffer vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[0], 0, NULL); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is invalid because bound DescriptorSet"); vkEndCommandBuffer(m_commandBuffer->handle()); m_errorMonitor->VerifyFound(); } } vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); vkDestroyBuffer(m_device->handle(), dyub, NULL); vkFreeMemory(m_device->handle(), mem, NULL); vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, NULL); } TEST_F(VkLayerTest, AllocatePushDescriptorSet) { TEST_DESCRIPTION("Attempt to allocate a push descriptor set."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &binding; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount}; auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>(); dspci.poolSizeCount = 1; dspci.pPoolSizes = &pool_size; dspci.maxSets = 1; VkDescriptorPool pool; err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool); ASSERT_VK_SUCCESS(err); auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(); ds_alloc_info.descriptorPool = pool; ds_alloc_info.descriptorSetCount = 1; ds_alloc_info.pSetLayouts = &ds_layout; VkDescriptorSet ds = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308"); vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds); m_errorMonitor->VerifyFound(); vkDestroyDescriptorPool(m_device->handle(), pool, nullptr); vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr); } TEST_F(VkLayerTest, PushDescriptorSetCmdPushBadArgs) { TEST_DESCRIPTION("Attempt to push a push descriptor set with incorrect arguments."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Create ordinary and push descriptor set layout VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj ds_layout(m_device, {binding}); ASSERT_TRUE(ds_layout.initialized()); const VkDescriptorSetLayoutObj push_ds_layout(m_device, {binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); ASSERT_TRUE(push_ds_layout.initialized()); // Now use the descriptor set layouts to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout}); ASSERT_TRUE(pipeline_layout.initialized()); // Create a descriptor to push const uint32_t buffer_data[4] = {4, 5, 6, 7}; VkConstantBufferObj buffer_obj(m_device, sizeof(buffer_data), &buffer_data); ASSERT_TRUE(buffer_obj.initialized()); // Create a "write" struct, noting that the buffer_info cannot be a temporary arg (the return from write_descriptor_set // references its data), and the DescriptorSet() can be temporary, because the value is ignored VkDescriptorBufferInfo buffer_info = {buffer_obj.handle(), 0, VK_WHOLE_SIZE}; VkWriteDescriptorSet descriptor_write = vk_testing::Device::write_descriptor_set( vk_testing::DescriptorSet(), 0, 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, &buffer_info); // Find address of extension call and make the call PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); ASSERT_TRUE(vkCmdPushDescriptorSetKHR != nullptr); // Section 1: Queue family matching/capabilities. // Create command pool on a non-graphics queue const uint32_t no_gfx_qfi = m_device->QueueFamilyMatching(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT); const uint32_t transfer_only_qfi = m_device->QueueFamilyMatching(VK_QUEUE_TRANSFER_BIT, (VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT)); if ((UINT32_MAX == transfer_only_qfi) && (UINT32_MAX == no_gfx_qfi)) { printf("%s No compute or transfer only queue family, skipping bindpoint and queue tests.", kSkipPrefix); } else { const uint32_t err_qfi = (UINT32_MAX == no_gfx_qfi) ? transfer_only_qfi : no_gfx_qfi; VkCommandPoolObj command_pool(m_device, err_qfi); ASSERT_TRUE(command_pool.initialized()); VkCommandBufferObj command_buffer(m_device, &command_pool); ASSERT_TRUE(command_buffer.initialized()); command_buffer.begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"); if (err_qfi == transfer_only_qfi) { // This as this queue neither supports the gfx or compute bindpoints, we'll get two errors m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); } vkCmdPushDescriptorSetKHR(command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); command_buffer.end(); // If we succeed in testing only one condition above, we need to test the other below. if ((UINT32_MAX != transfer_only_qfi) && (err_qfi != transfer_only_qfi)) { // Need to test the neither compute/gfx supported case separately. VkCommandPoolObj tran_command_pool(m_device, transfer_only_qfi); ASSERT_TRUE(tran_command_pool.initialized()); VkCommandBufferObj tran_command_buffer(m_device, &tran_command_pool); ASSERT_TRUE(tran_command_buffer.initialized()); tran_command_buffer.begin(); // We can't avoid getting *both* errors in this case m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool"); vkCmdPushDescriptorSetKHR(tran_command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); tran_command_buffer.end(); } } // Push to the non-push binding m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00365"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1, &descriptor_write); m_errorMonitor->VerifyFound(); // Specify set out of bounds m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00364"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 2, 1, &descriptor_write); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); // This is a test for VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording // TODO: Add VALIDATION_ERROR_ code support to core_validation::ValidateCmd m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "You must call vkBeginCommandBuffer() before this call to vkCmdPushDescriptorSetKHR()"); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SetDynScissorParamTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetScissor without multiViewport feature"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); const VkRect2D scissor = {{0, 0}, {16, 16}}; const VkRect2D scissors[] = {scissor, scissor}; m_commandBuffer->begin(); // array tests m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 0, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594"); vkCmdSetScissor(m_commandBuffer->handle(), 1, 2, scissors); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, nullptr); m_errorMonitor->VerifyFound(); struct TestCase { VkRect2D scissor; std::string vuid; }; std::vector<TestCase> test_cases = {{{{-1, 0}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"}, {{{0, -1}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"}, {{{1, 0}, {INT32_MAX, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{INT32_MAX, 0}, {1, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{0, 0}, {uint32_t{INT32_MAX} + 1, 16}}, "VUID-vkCmdSetScissor-offset-00596"}, {{{0, 1}, {16, INT32_MAX}}, "VUID-vkCmdSetScissor-offset-00597"}, {{{0, INT32_MAX}, {16, 1}}, "VUID-vkCmdSetScissor-offset-00597"}, {{{0, 0}, {16, uint32_t{INT32_MAX} + 1}}, "VUID-vkCmdSetScissor-offset-00597"}}; for (const auto &test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid); vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &test_case.scissor); m_errorMonitor->VerifyFound(); } m_commandBuffer->end(); } TEST_F(VkLayerTest, SetDynScissorParamMultiviewportTests) { TEST_DESCRIPTION("Test parameters of vkCmdSetScissor with multiViewport feature enabled"); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } const auto max_scissors = m_device->props.limits.maxViewports; const uint32_t too_many_scissors = 65536 + 1; // let's say this is too much to allocate pScissors for m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter"); vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors, nullptr); m_errorMonitor->VerifyFound(); if (max_scissors >= too_many_scissors) { printf( "%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping " "part of " "test.\n", kSkipPrefix); return; } const VkRect2D scissor = {{0, 0}, {16, 16}}; const std::vector<VkRect2D> scissors(max_scissors + 1, scissor); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors + 1, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), max_scissors, 1, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), 1, max_scissors, scissors.data()); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592"); vkCmdSetScissor(m_commandBuffer->handle(), max_scissors + 1, 0, scissors.data()); m_errorMonitor->VerifyFound(); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, EmptyDescriptorUpdateTest) { TEST_DESCRIPTION("Update last descriptor in a set that includes an empty binding"); VkResult err; ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); // Create layout with two uniform buffer descriptors w/ empty binding between them OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0 /*!*/, 0, nullptr}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); // Create a buffer to be used for update VkBufferCreateInfo buff_ci = {}; buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buff_ci.size = 256; buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer); ASSERT_VK_SUCCESS(err); // Have to bind memory to buffer before descriptor update VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 512; // one allocation for both buffers mem_alloc.memoryTypeIndex = 0; VkMemoryRequirements mem_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } // Make sure allocation is sufficiently large to accommodate buffer requirements if (mem_reqs.size > mem_alloc.allocationSize) { mem_alloc.allocationSize = mem_reqs.size; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // Only update the descriptor at binding 2 VkDescriptorBufferInfo buff_info = {}; buff_info.buffer = buffer; buff_info.offset = 0; buff_info.range = VK_WHOLE_SIZE; VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = ds.set_; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_errorMonitor->VerifyNotFound(); // Cleanup vkFreeMemory(m_device->device(), mem, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); } TEST_F(VkLayerTest, MultiplePushDescriptorSets) { TEST_DESCRIPTION("Verify an error message for multiple push descriptor sets."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const unsigned int descriptor_set_layout_count = 2; std::vector<VkDescriptorSetLayoutObj> ds_layouts; for (uint32_t i = 0; i < descriptor_set_layout_count; ++i) { dsl_binding.binding = i; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding), VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); } const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = {}; pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO; pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.pushConstantRangeCount = 0; pipeline_layout_ci.pPushConstantRanges = NULL; pipeline_layout_ci.setLayoutCount = ds_vk_layouts.size(); pipeline_layout_ci.pSetLayouts = ds_vk_layouts.data(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293"); vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateDescriptorUpdateTemplate) { TEST_DESCRIPTION("Verify error messages for invalid vkCreateDescriptorUpdateTemplate calls."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME); } else { printf("%s Push Descriptors and Descriptor Update Template Extensions not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub1(m_device, {dsl_binding}); const VkDescriptorSetLayoutObj ds_layout_ub_push(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); const VkPipelineLayoutObj pipeline_layout(m_device, {{&ds_layout_ub, &ds_layout_ub1, &ds_layout_ub_push}}); PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR = (PFN_vkCreateDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR"); ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr); PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR = (PFN_vkDestroyDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR"); ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr); VkDescriptorUpdateTemplateEntry entries = {0, 0, 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, sizeof(VkBuffer)}; VkDescriptorUpdateTemplateCreateInfo create_info = {}; create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO; create_info.pNext = nullptr; create_info.flags = 0; create_info.descriptorUpdateEntryCount = 1; create_info.pDescriptorUpdateEntries = &entries; auto do_test = [&](std::string err) { VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err); if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) { vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr); } m_errorMonitor->VerifyFound(); }; // Descriptor set type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET; // descriptorSetLayout is NULL do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350"); // Push descriptor type template create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR; create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; create_info.pipelineLayout = pipeline_layout.handle(); create_info.set = 2; // Bad bindpoint -- force fuzz the bind point memset(&create_info.pipelineBindPoint, 0xFE, sizeof(create_info.pipelineBindPoint)); do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351"); create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE; // Bad pipeline layout create_info.pipelineLayout = VK_NULL_HANDLE; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352"); create_info.pipelineLayout = pipeline_layout.handle(); // Wrong set # create_info.set = 0; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); // Invalid set # create_info.set = 42; do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353"); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, PushDescriptorNullDstSetTest) { TEST_DESCRIPTION("Use null dstSet in CmdPushDescriptorSetKHR"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); // Now use the descriptor layout to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout}); static const float vbo_data[3] = {1.f, 0.f, 1.f}; VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data); VkDescriptorBufferInfo buff_info; buff_info.buffer = vbo.handle(); buff_info.offset = 0; buff_info.range = sizeof(vbo_data); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = 0; // Should not cause a validation error // Find address of extension call and make the call PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); assert(vkCmdPushDescriptorSetKHR != nullptr); m_commandBuffer->begin(); vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, PushDescriptorUnboundSetTest) { TEST_DESCRIPTION("Ensure that no validation errors are produced for not bound push descriptor sets"); VkResult err; if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); // Create descriptor set layout VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); // Create push descriptor set layout const VkDescriptorSetLayoutObj push_ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); // Allocate descriptor set VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.descriptorPool = ds_pool; alloc_info.descriptorSetCount = 1; alloc_info.pSetLayouts = &ds_layout.handle(); VkDescriptorSet descriptor_set; err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set); ASSERT_VK_SUCCESS(err); // Now use the descriptor layouts to create a pipeline layout const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout}); // Create PSO char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=2) uniform foo1 { float x; } bar1;\n" "layout(set=1) layout(binding=2) uniform foo2 { float y; } bar2;\n" "void main(){\n" " x = vec4(bar1.x) + vec4(bar2.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); static const float bo_data[1] = {1.f}; VkConstantBufferObj buffer(m_device, sizeof(bo_data), (const void *)&bo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT); // Update descriptor set VkDescriptorBufferInfo buff_info; buff_info.buffer = buffer.handle(); buff_info.offset = 0; buff_info.range = sizeof(bo_data); VkWriteDescriptorSet descriptor_write = {}; descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstBinding = 2; descriptor_write.descriptorCount = 1; descriptor_write.pTexelBufferView = nullptr; descriptor_write.pBufferInfo = &buff_info; descriptor_write.pImageInfo = nullptr; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_write.dstSet = descriptor_set; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR = (PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR"); assert(vkCmdPushDescriptorSetKHR != nullptr); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // Push descriptors and bind descriptor set vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptor_write); vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1, &descriptor_set, 0, NULL); // No errors should be generated. vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, TestAliasedMemoryTracking) { VkResult err; bool pass; TEST_DESCRIPTION( "Create a buffer, allocate memory, bind memory, destroy the buffer, create an image, and bind the same memory to it"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkBuffer buffer; VkImage image; VkDeviceMemory mem; VkMemoryRequirements mem_reqs; VkBufferCreateInfo buf_info = {}; buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buf_info.pNext = NULL; buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buf_info.size = 256; buf_info.queueFamilyIndexCount = 0; buf_info.pQueueFamilyIndices = NULL; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; buf_info.flags = 0; err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; // Ensure memory is big enough for both bindings alloc_info.allocationSize = 0x10000; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); uint8_t *pData; err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); memset(pData, 0xCADECADE, static_cast<size_t>(mem_reqs.size)); vkUnmapMemory(m_device->device(), mem); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); // NOW, destroy the buffer. Obviously, the resource no longer occupies this // memory. In fact, it was never used by the GPU. // Just be sure, wait for idle. vkDestroyBuffer(m_device->device(), buffer, NULL); vkDeviceWaitIdle(m_device->device()); // Use optimal as some platforms report linear support but then fail image creation VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL; VkImageFormatProperties image_format_properties; vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, image_tiling, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &image_format_properties); if (image_format_properties.maxExtent.width == 0) { printf("%s Image format not supported; skipped.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); return; } VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = image_tiling; image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.queueFamilyIndexCount = 0; image_create_info.pQueueFamilyIndices = NULL; image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; image_create_info.flags = 0; /* Create a mappable image. It will be the texture if linear images are OK * to be textures or it will be the staging image if they are not. */ err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = 0; mem_alloc.memoryTypeIndex = 0; mem_alloc.allocationSize = mem_reqs.size; pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyImage(m_device->device(), image, NULL); return; } // VALIDATION FAILURE: err = vkBindImageMemory(m_device->device(), image, mem, 0); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkFreeMemory(m_device->device(), mem, NULL); vkDestroyImage(m_device->device(), image, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, TestDestroyFreeNullHandles) { VkResult err; TEST_DESCRIPTION("Call all applicable destroy and free routines with NULL handles, expecting no validation errors"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); vkDestroyBuffer(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyBufferView(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyCommandPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDescriptorPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDescriptorSetLayout(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyDevice(VK_NULL_HANDLE, NULL); vkDestroyEvent(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyFramebuffer(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyImage(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyImageView(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyInstance(VK_NULL_HANDLE, NULL); vkDestroyPipeline(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyPipelineCache(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyPipelineLayout(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyQueryPool(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyRenderPass(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroySampler(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroySemaphore(m_device->device(), VK_NULL_HANDLE, NULL); vkDestroyShaderModule(m_device->device(), VK_NULL_HANDLE, NULL); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffers[3] = {}; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffers[1]); vkFreeCommandBuffers(m_device->device(), command_pool, 3, command_buffers); vkDestroyCommandPool(m_device->device(), command_pool, NULL); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = {}; ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; ds_pool_ci.pNext = NULL; ds_pool_ci.maxSets = 1; ds_pool_ci.poolSizeCount = 1; ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 2; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}); VkDescriptorSet descriptor_sets[3] = {}; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; alloc_info.descriptorSetCount = 1; alloc_info.descriptorPool = ds_pool; alloc_info.pSetLayouts = &ds_layout.handle(); err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_sets[1]); ASSERT_VK_SUCCESS(err); vkFreeDescriptorSets(m_device->device(), ds_pool, 3, descriptor_sets); vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL); vkFreeMemory(m_device->device(), VK_NULL_HANDLE, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, QueueSubmitSemaphoresAndLayoutTracking) { TEST_DESCRIPTION("Submit multiple command buffers with chained semaphore signals and layout transitions"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkCommandBuffer cmd_bufs[4]; VkCommandBufferAllocateInfo alloc_info; alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.commandBufferCount = 4; alloc_info.commandPool = m_commandPool->handle(); alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs); VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT), VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkCommandBufferBeginInfo cb_binfo; cb_binfo.pNext = NULL; cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; cb_binfo.pInheritanceInfo = VK_NULL_HANDLE; cb_binfo.flags = 0; // Use 4 command buffers, each with an image layout transition, ColorAO->General->ColorAO->TransferSrc->TransferDst vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = image.handle(); img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vkCmdPipelineBarrier(cmd_bufs[0], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[0]); vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[1], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[1]); vkBeginCommandBuffer(cmd_bufs[2], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[2], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[2]); vkBeginCommandBuffer(cmd_bufs[3], &cb_binfo); img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; vkCmdPipelineBarrier(cmd_bufs[3], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); vkEndCommandBuffer(cmd_bufs[3]); // Submit 4 command buffers in 3 submits, with submits 2 and 3 waiting for semaphores from submits 1 and 2 VkSemaphore semaphore1, semaphore2; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore1); vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore2); VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info[3]; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].pNext = nullptr; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = &cmd_bufs[0]; submit_info[0].signalSemaphoreCount = 1; submit_info[0].pSignalSemaphores = &semaphore1; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitDstStageMask = nullptr; submit_info[0].pWaitDstStageMask = flags; submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[1].pNext = nullptr; submit_info[1].commandBufferCount = 1; submit_info[1].pCommandBuffers = &cmd_bufs[1]; submit_info[1].waitSemaphoreCount = 1; submit_info[1].pWaitSemaphores = &semaphore1; submit_info[1].signalSemaphoreCount = 1; submit_info[1].pSignalSemaphores = &semaphore2; submit_info[1].pWaitDstStageMask = flags; submit_info[2].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[2].pNext = nullptr; submit_info[2].commandBufferCount = 2; submit_info[2].pCommandBuffers = &cmd_bufs[2]; submit_info[2].waitSemaphoreCount = 1; submit_info[2].pWaitSemaphores = &semaphore2; submit_info[2].signalSemaphoreCount = 0; submit_info[2].pSignalSemaphores = nullptr; submit_info[2].pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 3, submit_info, VK_NULL_HANDLE); vkQueueWaitIdle(m_device->m_queue); vkDestroySemaphore(m_device->device(), semaphore1, NULL); vkDestroySemaphore(m_device->device(), semaphore2, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, DynamicOffsetWithInactiveBinding) { // Create a descriptorSet w/ dynamic descriptors where 1 binding is inactive // We previously had a bug where dynamic offset of inactive bindings was still being used VkResult err; m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_}); // Create two buffers to update the descriptors with // The first will be 2k and used for bindings 0 & 1, the second is 1k for binding 2 uint32_t qfi = 0; VkBufferCreateInfo buffCI = {}; buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffCI.size = 2048; buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffCI.queueFamilyIndexCount = 1; buffCI.pQueueFamilyIndices = &qfi; VkBuffer dyub1; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub1); ASSERT_VK_SUCCESS(err); // buffer2 buffCI.size = 1024; VkBuffer dyub2; err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub2); ASSERT_VK_SUCCESS(err); // Allocate memory and bind to buffers VkMemoryAllocateInfo mem_alloc[2] = {}; mem_alloc[0].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc[0].pNext = NULL; mem_alloc[0].memoryTypeIndex = 0; mem_alloc[1].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc[1].pNext = NULL; mem_alloc[1].memoryTypeIndex = 0; VkMemoryRequirements mem_reqs1; vkGetBufferMemoryRequirements(m_device->device(), dyub1, &mem_reqs1); VkMemoryRequirements mem_reqs2; vkGetBufferMemoryRequirements(m_device->device(), dyub2, &mem_reqs2); mem_alloc[0].allocationSize = mem_reqs1.size; bool pass = m_device->phy().set_memory_type(mem_reqs1.memoryTypeBits, &mem_alloc[0], 0); mem_alloc[1].allocationSize = mem_reqs2.size; pass &= m_device->phy().set_memory_type(mem_reqs2.memoryTypeBits, &mem_alloc[1], 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), dyub1, NULL); vkDestroyBuffer(m_device->device(), dyub2, NULL); return; } VkDeviceMemory mem1; err = vkAllocateMemory(m_device->device(), &mem_alloc[0], NULL, &mem1); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub1, mem1, 0); ASSERT_VK_SUCCESS(err); VkDeviceMemory mem2; err = vkAllocateMemory(m_device->device(), &mem_alloc[1], NULL, &mem2); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), dyub2, mem2, 0); ASSERT_VK_SUCCESS(err); // Update descriptors const uint32_t BINDING_COUNT = 3; VkDescriptorBufferInfo buff_info[BINDING_COUNT] = {}; buff_info[0].buffer = dyub1; buff_info[0].offset = 0; buff_info[0].range = 256; buff_info[1].buffer = dyub1; buff_info[1].offset = 256; buff_info[1].range = 512; buff_info[2].buffer = dyub2; buff_info[2].offset = 0; buff_info[2].range = 512; VkWriteDescriptorSet descriptor_write; memset(&descriptor_write, 0, sizeof(descriptor_write)); descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = BINDING_COUNT; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; descriptor_write.pBufferInfo = buff_info; vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Create PSO to be used for draw-time errors below char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 x;\n" "layout(set=0) layout(binding=0) uniform foo1 { int x; int y; } bar1;\n" "layout(set=0) layout(binding=2) uniform foo2 { int x; int y; } bar2;\n" "void main(){\n" " x = vec4(bar1.y) + vec4(bar2.y);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.SetViewport(m_viewports); pipe.SetScissor(m_scissors); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); // This update should succeed, but offset of inactive binding 1 oversteps binding 2 buffer size // we used to have a bug in this case. uint32_t dyn_off[BINDING_COUNT] = {0, 1024, 256}; vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, BINDING_COUNT, dyn_off); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); vkDestroyBuffer(m_device->device(), dyub1, NULL); vkDestroyBuffer(m_device->device(), dyub2, NULL); vkFreeMemory(m_device->device(), mem1, NULL); vkFreeMemory(m_device->device(), mem2, NULL); } TEST_F(VkPositiveLayerTest, NonCoherentMemoryMapping) { TEST_DESCRIPTION( "Ensure that validations handling of non-coherent memory mapping while using VK_WHOLE_SIZE does not cause access " "violations"); VkResult err; uint8_t *pData; ASSERT_NO_FATAL_FAILURE(Init()); VkDeviceMemory mem; VkMemoryRequirements mem_reqs; mem_reqs.memoryTypeBits = 0xFFFFFFFF; const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize; VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.pNext = NULL; alloc_info.memoryTypeIndex = 0; static const VkDeviceSize allocation_size = 32 * atom_size; alloc_info.allocationSize = allocation_size; // Find a memory configurations WITHOUT a COHERENT bit, otherwise exit bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { pass = m_device->phy().set_memory_type( mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); if (!pass) { printf("%s Couldn't find a memory type wihtout a COHERENT bit.\n", kSkipPrefix); return; } } } err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); // Map/Flush/Invalidate using WHOLE_SIZE and zero offsets and entire mapped range m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); VkMappedMemoryRange mmr = {}; mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 0; mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map/Flush/Invalidate using WHOLE_SIZE and an offset and entire mapped range m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 5 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 6 * atom_size; mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map with offset and size // Flush/Invalidate subrange of mapped area with offset and size m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 3 * atom_size, 9 * atom_size, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = 4 * atom_size; mmr.size = 2 * atom_size; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); // Map without offset and flush WHOLE_SIZE with two separate offsets m_errorMonitor->ExpectSuccess(); err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData); ASSERT_VK_SUCCESS(err); mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; mmr.memory = mem; mmr.offset = allocation_size - (4 * atom_size); mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); mmr.offset = allocation_size - (6 * atom_size); mmr.size = VK_WHOLE_SIZE; err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); vkUnmapMemory(m_device->device(), mem); vkFreeMemory(m_device->device(), mem, NULL); } // This is a positive test. We used to expect error in this case but spec now allows it TEST_F(VkPositiveLayerTest, ResetUnsignaledFence) { m_errorMonitor->ExpectSuccess(); vk_testing::Fence testFence; VkFenceCreateInfo fenceInfo = {}; fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fenceInfo.pNext = NULL; ASSERT_NO_FATAL_FAILURE(Init()); testFence.init(*m_device, fenceInfo); VkFence fences[1] = {testFence.handle()}; VkResult result = vkResetFences(m_device->device(), 1, fences); ASSERT_VK_SUCCESS(result); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CommandBufferSimultaneousUseSync) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; // Record (empty!) command buffer that can be submitted multiple times // simultaneously. VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr}; m_commandBuffer->begin(&cbbi); m_commandBuffer->end(); VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence fence; err = vkCreateFence(m_device->device(), &fci, nullptr, &fence); ASSERT_VK_SUCCESS(err); VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore s1, s2; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s1); ASSERT_VK_SUCCESS(err); err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s2); ASSERT_VK_SUCCESS(err); // Submit CB once signaling s1, with fence so we can roll forward to its retirement. VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &m_commandBuffer->handle(), 1, &s1}; err = vkQueueSubmit(m_device->m_queue, 1, &si, fence); ASSERT_VK_SUCCESS(err); // Submit CB again, signaling s2. si.pSignalSemaphores = &s2; err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Wait for fence. err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); // CB is still in flight from second submission, but semaphore s1 is no // longer in flight. delete it. vkDestroySemaphore(m_device->device(), s1, nullptr); m_errorMonitor->VerifyNotFound(); // Force device idle and clean up remaining objects vkDeviceWaitIdle(m_device->device()); vkDestroySemaphore(m_device->device(), s2, nullptr); vkDestroyFence(m_device->device(), fence, nullptr); } TEST_F(VkPositiveLayerTest, FenceCreateSignaledWaitHandling) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; // A fence created signaled VkFenceCreateInfo fci1 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, VK_FENCE_CREATE_SIGNALED_BIT}; VkFence f1; err = vkCreateFence(m_device->device(), &fci1, nullptr, &f1); ASSERT_VK_SUCCESS(err); // A fence created not VkFenceCreateInfo fci2 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence f2; err = vkCreateFence(m_device->device(), &fci2, nullptr, &f2); ASSERT_VK_SUCCESS(err); // Submit the unsignaled fence VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(m_device->m_queue, 1, &si, f2); // Wait on both fences, with signaled first. VkFence fences[] = {f1, f2}; vkWaitForFences(m_device->device(), 2, fences, VK_TRUE, UINT64_MAX); // Should have both retired! vkDestroyFence(m_device->device(), f1, nullptr); vkDestroyFence(m_device->device(), f2, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreateImageViewFollowsParameterCompatibilityRequirements) { TEST_DESCRIPTION("Verify that creating an ImageView with valid usage does not generate validation errors."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, nullptr, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT, VK_IMAGE_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {128, 128, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_SHARING_MODE_EXCLUSIVE, 0, nullptr, VK_IMAGE_LAYOUT_UNDEFINED}; VkImageObj image(m_device); image.init(&imgInfo); ASSERT_TRUE(image.initialized()); VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.baseArrayLayer = 0; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); } TEST_F(VkPositiveLayerTest, ValidUsage) { TEST_DESCRIPTION("Verify that creating an image view from an image with valid usage doesn't generate validation errors"); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); // Verify that we can create a view with usage INPUT_ATTACHMENT VkImageObj image(m_device); image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imageView; VkImageViewCreateInfo ivci = {}; ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; ivci.image = image.handle(); ivci.viewType = VK_IMAGE_VIEW_TYPE_2D; ivci.format = VK_FORMAT_R8G8B8A8_UNORM; ivci.subresourceRange.layerCount = 1; ivci.subresourceRange.baseMipLevel = 0; ivci.subresourceRange.levelCount = 1; ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkCreateImageView(m_device->device(), &ivci, NULL, &imageView); m_errorMonitor->VerifyNotFound(); vkDestroyImageView(m_device->device(), imageView, NULL); } // This is a positive test. No failures are expected. TEST_F(VkPositiveLayerTest, BindSparse) { TEST_DESCRIPTION("Bind 2 memory ranges to one image using vkQueueBindSparse, destroy the image and then free the memory"); ASSERT_NO_FATAL_FAILURE(Init()); auto index = m_device->graphics_queue_node_index_; if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) { printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix); return; } if (!m_device->phy().features().sparseBinding) { printf("%s Device does not support sparse bindings.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkImage image; VkImageCreateInfo image_create_info = {}; image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; image_create_info.pNext = NULL; image_create_info.imageType = VK_IMAGE_TYPE_2D; image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM; image_create_info.extent.width = 64; image_create_info.extent.height = 64; image_create_info.extent.depth = 1; image_create_info.mipLevels = 1; image_create_info.arrayLayers = 1; image_create_info.samples = VK_SAMPLE_COUNT_1_BIT; image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL; image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT; VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; VkDeviceMemory memory_one, memory_two; bool pass; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = NULL; memory_info.allocationSize = 0; memory_info.memoryTypeIndex = 0; vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); // Find an image big enough to allow sparse mapping of 2 memory regions // Increase the image size until it is at least twice the // size of the required alignment, to ensure we can bind both // allocated memory blocks to the image on aligned offsets. while (memory_reqs.size < (memory_reqs.alignment * 2)) { vkDestroyImage(m_device->device(), image, nullptr); image_create_info.extent.width *= 2; image_create_info.extent.height *= 2; err = vkCreateImage(m_device->device(), &image_create_info, nullptr, &image); ASSERT_VK_SUCCESS(err); vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs); } // Allocate 2 memory regions of minimum alignment size, bind one at 0, the other // at the end of the first memory_info.allocationSize = memory_reqs.alignment; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_one); ASSERT_VK_SUCCESS(err); err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_two); ASSERT_VK_SUCCESS(err); VkSparseMemoryBind binds[2]; binds[0].flags = 0; binds[0].memory = memory_one; binds[0].memoryOffset = 0; binds[0].resourceOffset = 0; binds[0].size = memory_info.allocationSize; binds[1].flags = 0; binds[1].memory = memory_two; binds[1].memoryOffset = 0; binds[1].resourceOffset = memory_info.allocationSize; binds[1].size = memory_info.allocationSize; VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo; opaqueBindInfo.image = image; opaqueBindInfo.bindCount = 2; opaqueBindInfo.pBinds = binds; VkFence fence = VK_NULL_HANDLE; VkBindSparseInfo bindSparseInfo = {}; bindSparseInfo.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO; bindSparseInfo.imageOpaqueBindCount = 1; bindSparseInfo.pImageOpaqueBinds = &opaqueBindInfo; vkQueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence); vkQueueWaitIdle(m_device->m_queue); vkDestroyImage(m_device->device(), image, NULL); vkFreeMemory(m_device->device(), memory_one, NULL); vkFreeMemory(m_device->device(), memory_two, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, RenderPassInitialLayoutUndefined) { TEST_DESCRIPTION( "Ensure that CmdBeginRenderPass with an attachment's initialLayout of VK_IMAGE_LAYOUT_UNDEFINED works when the command " "buffer has prior knowledge of that attachment's layout."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_R8G8B8A8_UNORM, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a single command buffer which uses this renderpass twice. The // bug is triggered at the beginning of the second renderpass, when the // command buffer already has a layout recorded for the attachment. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyNotFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkPositiveLayerTest, FramebufferBindingDestroyCommandPool) { TEST_DESCRIPTION( "This test should pass. Create a Framebuffer and command buffer, bind them together, then destroy command pool and " "framebuffer and verify there are no errors."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Explicitly create a command buffer to bind the FB to so that we can then // destroy the command pool in order to implicitly free command buffer VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); // Begin our cmd buffer with renderpass using our framebuffer VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdBeginRenderPass(command_buffer, &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(command_buffer); vkEndCommandBuffer(command_buffer); // Destroy command pool to implicitly free command buffer vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, RenderPassSubpassZeroTransitionsApplied) { TEST_DESCRIPTION("Ensure that CmdBeginRenderPass applies the layout transitions for the first subpass"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with one color attachment. VkAttachmentDescription attachment = {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep}; VkResult err; VkRenderPass rp; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a single command buffer which issues a pipeline barrier w/ // image memory barrier for the attachment. This detects the previously // missing tracking of the subpass layout by throwing a validation error // if it doesn't occur. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier imb = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, nullptr, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, image.handle(), {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}}; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, DepthStencilLayoutTransitionForDepthOnlyImageview) { TEST_DESCRIPTION( "Validate that when an imageView of a depth/stencil image is used as a depth/stencil framebuffer attachment, the " "aspectMask is ignored and both depth and stencil image subresources are used."); ASSERT_NO_FATAL_FAILURE(Init()); VkFormatProperties format_properties; vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &format_properties); if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { printf("%s Image format does not support sampling.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkAttachmentDescription attachment = {0, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr}; VkSubpassDependency dep = {0, 0, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_DEPENDENCY_BY_REGION_BIT}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep}; VkResult err; VkRenderPass rp; err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkImageObj image(m_device); image.InitNoLayout(32, 32, 1, VK_FORMAT_D32_SFLOAT_S8_UINT, 0x26, // usage VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); image.SetLayout(0x6, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, VK_FORMAT_D32_SFLOAT_S8_UINT, {VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A}, {0x2, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); m_commandBuffer->begin(); VkImageMemoryBarrier imb = {}; imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; imb.pNext = nullptr; imb.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; imb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; imb.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; imb.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; imb.srcQueueFamilyIndex = 0; imb.dstQueueFamilyIndex = 0; imb.image = image.handle(); imb.subresourceRange.aspectMask = 0x6; imb.subresourceRange.baseMipLevel = 0; imb.subresourceRange.levelCount = 0x1; imb.subresourceRange.baseArrayLayer = 0; imb.subresourceRange.layerCount = 0x1; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyImageView(m_device->device(), view, nullptr); } TEST_F(VkPositiveLayerTest, RenderPassTransitionsAttachmentUnused) { TEST_DESCRIPTION( "Ensure that layout transitions work correctly without errors, when an attachment reference is VK_ATTACHMENT_UNUSED"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); // A renderpass with no attachments VkAttachmentReference att_ref = {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible framebuffer. VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); // Record a command buffer which just begins and ends the renderpass. The // bug manifests in BeginRenderPass. VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); vkDestroyFramebuffer(m_device->device(), fb, nullptr); vkDestroyRenderPass(m_device->device(), rp, nullptr); } // This is a positive test. No errors are expected. TEST_F(VkPositiveLayerTest, StencilLoadOp) { TEST_DESCRIPTION("Create a stencil-only attachment with a LOAD_OP set to CLEAR. stencil[Load|Store]Op used to be ignored."); VkResult result = VK_SUCCESS; ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageFormatProperties formatProps; vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &formatProps); if (formatProps.maxExtent.width < 100 || formatProps.maxExtent.height < 100) { printf("%s Image format max extent is too small.\n", kSkipPrefix); return; } VkFormat depth_stencil_fmt = depth_format; m_depthStencil->Init(m_device, 100, 100, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT); VkAttachmentDescription att = {}; VkAttachmentReference ref = {}; att.format = depth_stencil_fmt; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkClearValue clear; clear.depthStencil.depth = 1.0; clear.depthStencil.stencil = 0; ref.attachment = 0; ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.flags = 0; subpass.inputAttachmentCount = 0; subpass.pInputAttachments = NULL; subpass.colorAttachmentCount = 0; subpass.pColorAttachments = NULL; subpass.pResolveAttachments = NULL; subpass.pDepthStencilAttachment = &ref; subpass.preserveAttachmentCount = 0; subpass.pPreserveAttachments = NULL; VkRenderPass rp; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = 1; rp_info.pAttachments = &att; rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; result = vkCreateRenderPass(device(), &rp_info, NULL, &rp); ASSERT_VK_SUCCESS(result); VkImageView *depthView = m_depthStencil->BindInfo(); VkFramebufferCreateInfo fb_info = {}; fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO; fb_info.pNext = NULL; fb_info.renderPass = rp; fb_info.attachmentCount = 1; fb_info.pAttachments = depthView; fb_info.width = 100; fb_info.height = 100; fb_info.layers = 1; VkFramebuffer fb; result = vkCreateFramebuffer(device(), &fb_info, NULL, &fb); ASSERT_VK_SUCCESS(result); VkRenderPassBeginInfo rpbinfo = {}; rpbinfo.clearValueCount = 1; rpbinfo.pClearValues = &clear; rpbinfo.pNext = NULL; rpbinfo.renderPass = rp; rpbinfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO; rpbinfo.renderArea.extent.width = 100; rpbinfo.renderArea.extent.height = 100; rpbinfo.renderArea.offset.x = 0; rpbinfo.renderArea.offset.y = 0; rpbinfo.framebuffer = fb; VkFenceObj fence; fence.init(*m_device, VkFenceObj::create_info()); ASSERT_TRUE(fence.initialized()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(rpbinfo); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(fence); VkImageObj destImage(m_device); destImage.Init(100, 100, 1, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageMemoryBarrier barrier = {}; VkImageSubresourceRange range; barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT; barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL; barrier.image = m_depthStencil->handle(); range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; range.baseMipLevel = 0; range.levelCount = 1; range.baseArrayLayer = 0; range.layerCount = 1; barrier.subresourceRange = range; fence.wait(VK_TRUE, UINT64_MAX); VkCommandBufferObj cmdbuf(m_device, m_commandPool); cmdbuf.begin(); cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); barrier.srcAccessMask = 0; barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; barrier.image = destImage.handle(); barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); VkImageCopy cregion; cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; cregion.srcSubresource.mipLevel = 0; cregion.srcSubresource.baseArrayLayer = 0; cregion.srcSubresource.layerCount = 1; cregion.srcOffset.x = 0; cregion.srcOffset.y = 0; cregion.srcOffset.z = 0; cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; cregion.dstSubresource.mipLevel = 0; cregion.dstSubresource.baseArrayLayer = 0; cregion.dstSubresource.layerCount = 1; cregion.dstOffset.x = 0; cregion.dstOffset.y = 0; cregion.dstOffset.z = 0; cregion.extent.width = 100; cregion.extent.height = 100; cregion.extent.depth = 1; cmdbuf.CopyImage(m_depthStencil->handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, destImage.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion); cmdbuf.end(); VkSubmitInfo submit_info; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.pNext = NULL; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = NULL; submit_info.pWaitDstStageMask = NULL; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmdbuf.handle(); submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = NULL; m_errorMonitor->ExpectSuccess(); vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); m_errorMonitor->VerifyNotFound(); vkQueueWaitIdle(m_device->m_queue); vkDestroyRenderPass(m_device->device(), rp, nullptr); vkDestroyFramebuffer(m_device->device(), fb, nullptr); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, BarrierLayoutToImageUsage) { TEST_DESCRIPTION("Ensure barriers' new and old VkImageLayout are compatible with their images' VkImageUsageFlags"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageMemoryBarrier img_barrier = {}; img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER; img_barrier.pNext = NULL; img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; { VkImageObj img_color(m_device); img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_color.initialized()); VkImageObj img_ds1(m_device); img_ds1.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds1.initialized()); VkImageObj img_ds2(m_device); img_ds2.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_ds2.initialized()); VkImageObj img_xfer_src(m_device); img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_src.initialized()); VkImageObj img_xfer_dst(m_device); img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_xfer_dst.initialized()); VkImageObj img_sampled(m_device); img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_sampled.initialized()); VkImageObj img_input(m_device); img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(img_input.initialized()); const struct { VkImageObj &image_obj; VkImageLayout old_layout; VkImageLayout new_layout; } buffer_layouts[] = { // clang-format off {img_color, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_ds1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_ds2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_sampled, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_input, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, {img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}, // clang-format on }; const uint32_t layout_count = sizeof(buffer_layouts) / sizeof(buffer_layouts[0]); m_commandBuffer->begin(); for (uint32_t i = 0; i < layout_count; ++i) { img_barrier.image = buffer_layouts[i].image_obj.handle(); const VkImageUsageFlags usage = buffer_layouts[i].image_obj.usage(); img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) ? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) : VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.oldLayout = buffer_layouts[i].old_layout; img_barrier.newLayout = buffer_layouts[i].new_layout; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); img_barrier.oldLayout = buffer_layouts[i].new_layout; img_barrier.newLayout = buffer_layouts[i].old_layout; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); } m_commandBuffer->end(); img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; } m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, WaitEventThenSet) { TEST_DESCRIPTION("Wait on a event then set it after the wait has been submitted."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdWaitEvents(command_buffer, 1, &event, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 0, nullptr); vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); vkEndCommandBuffer(command_buffer); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { vkSetEvent(m_device->device(), event); } vkQueueWaitIdle(queue); vkDestroyEvent(m_device->device(), event, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, QueryAndCopySecondaryCommandBuffers) { TEST_DESCRIPTION("Issue a query on a secondary command buffer and copy it on a primary."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info{}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); VkCommandPoolObj command_pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT); VkCommandBufferObj primary_buffer(m_device, &command_pool); VkCommandBufferObj secondary_buffer(m_device, &command_pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); uint32_t qfi = 0; VkBufferCreateInfo buff_create_info = {}; buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_create_info.size = 1024; buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buff_create_info.queueFamilyIndexCount = 1; buff_create_info.pQueueFamilyIndices = &qfi; VkResult err; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { printf("%s Failed to allocate memory.\n", kSkipPrefix); vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); VkCommandBufferInheritanceInfo hinfo = {}; hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO; hinfo.renderPass = VK_NULL_HANDLE; hinfo.subpass = 0; hinfo.framebuffer = VK_NULL_HANDLE; hinfo.occlusionQueryEnable = VK_FALSE; hinfo.queryFlags = 0; hinfo.pipelineStatistics = 0; { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; begin_info.pInheritanceInfo = &hinfo; secondary_buffer.begin(&begin_info); vkCmdResetQueryPool(secondary_buffer.handle(), query_pool, 0, 1); vkCmdWriteTimestamp(secondary_buffer.handle(), VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0); secondary_buffer.end(); primary_buffer.begin(); vkCmdExecuteCommands(primary_buffer.handle(), 1, &secondary_buffer.handle()); vkCmdCopyQueryPoolResults(primary_buffer.handle(), query_pool, 0, 1, buffer, 0, 0, 0); primary_buffer.end(); } primary_buffer.QueueCommandBuffer(); vkQueueWaitIdle(queue); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, QueryAndCopyMultipleCommandBuffers) { TEST_DESCRIPTION("Issue a query and copy from it on a second command buffer."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkQueryPool query_pool; VkQueryPoolCreateInfo query_pool_create_info{}; query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP; query_pool_create_info.queryCount = 1; vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); uint32_t qfi = 0; VkBufferCreateInfo buff_create_info = {}; buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buff_create_info.size = 1024; buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buff_create_info.queueFamilyIndexCount = 1; buff_create_info.pQueueFamilyIndices = &qfi; VkResult err; VkBuffer buffer; err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memReqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs); VkMemoryAllocateInfo mem_alloc = {}; mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; mem_alloc.pNext = NULL; mem_alloc.allocationSize = memReqs.size; mem_alloc.memoryTypeIndex = 0; bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0); if (!pass) { vkDestroyBuffer(m_device->device(), buffer, NULL); return; } VkDeviceMemory mem; err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, mem, 0); ASSERT_VK_SUCCESS(err); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdResetQueryPool(command_buffer[0], query_pool, 0, 1); vkCmdWriteTimestamp(command_buffer[0], VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0); vkEndCommandBuffer(command_buffer[0]); vkBeginCommandBuffer(command_buffer[1], &begin_info); vkCmdCopyQueryPoolResults(command_buffer[1], query_pool, 0, 1, buffer, 0, 0, 0); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 2; submit_info.pCommandBuffers = command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueWaitIdle(queue); vkDestroyQueryPool(m_device->device(), query_pool, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), mem, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, ResetEventThenSet) { TEST_DESCRIPTION("Reset an event then set it after the reset has been submitted."); ASSERT_NO_FATAL_FAILURE(Init()); VkEvent event; VkEventCreateInfo event_create_info{}; event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO; vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 1; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer, &begin_info); vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT); vkEndCommandBuffer(command_buffer); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = nullptr; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is already in use by a command buffer."); vkSetEvent(m_device->device(), event); m_errorMonitor->VerifyFound(); } vkQueueWaitIdle(queue); vkDestroyEvent(m_device->device(), event, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer); vkDestroyCommandPool(m_device->device(), command_pool, NULL); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoFencesThreeFrames) { TEST_DESCRIPTION( "Two command buffers with two separate fences are each run through a Submit & WaitForFences cycle 3 times. This previously " "revealed a bug so running this positive test to prevent a regression."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue); static const uint32_t NUM_OBJECTS = 2; static const uint32_t NUM_FRAMES = 3; VkCommandBuffer cmd_buffers[NUM_OBJECTS] = {}; VkFence fences[NUM_OBJECTS] = {}; VkCommandPool cmd_pool; VkCommandPoolCreateInfo cmd_pool_ci = {}; cmd_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; cmd_pool_ci.queueFamilyIndex = m_device->graphics_queue_node_index_; cmd_pool_ci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; VkResult err = vkCreateCommandPool(m_device->device(), &cmd_pool_ci, nullptr, &cmd_pool); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cmd_buf_info = {}; cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; cmd_buf_info.commandPool = cmd_pool; cmd_buf_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; cmd_buf_info.commandBufferCount = 1; VkFenceCreateInfo fence_ci = {}; fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; fence_ci.pNext = nullptr; fence_ci.flags = 0; for (uint32_t i = 0; i < NUM_OBJECTS; ++i) { err = vkAllocateCommandBuffers(m_device->device(), &cmd_buf_info, &cmd_buffers[i]); ASSERT_VK_SUCCESS(err); err = vkCreateFence(m_device->device(), &fence_ci, nullptr, &fences[i]); ASSERT_VK_SUCCESS(err); } for (uint32_t frame = 0; frame < NUM_FRAMES; ++frame) { for (uint32_t obj = 0; obj < NUM_OBJECTS; ++obj) { // Create empty cmd buffer VkCommandBufferBeginInfo cmdBufBeginDesc = {}; cmdBufBeginDesc.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; err = vkBeginCommandBuffer(cmd_buffers[obj], &cmdBufBeginDesc); ASSERT_VK_SUCCESS(err); err = vkEndCommandBuffer(cmd_buffers[obj]); ASSERT_VK_SUCCESS(err); VkSubmitInfo submit_info = {}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &cmd_buffers[obj]; // Submit cmd buffer and wait for fence err = vkQueueSubmit(queue, 1, &submit_info, fences[obj]); ASSERT_VK_SUCCESS(err); err = vkWaitForFences(m_device->device(), 1, &fences[obj], VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); err = vkResetFences(m_device->device(), 1, &fences[obj]); ASSERT_VK_SUCCESS(err); } } m_errorMonitor->VerifyNotFound(); vkDestroyCommandPool(m_device->device(), cmd_pool, NULL); for (uint32_t i = 0; i < NUM_OBJECTS; ++i) { vkDestroyFence(m_device->device(), fences[i], nullptr); } } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWI) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues followed by a QueueWaitIdle."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueWaitIdle(m_device->m_queue); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWIFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed " "by a QueueWaitIdle."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkQueueWaitIdle(m_device->m_queue); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceTwoWFF) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed " "by two consecutive WaitForFences calls on the same fence."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, TwoQueuesEnsureCorrectRetirementWithWorkStolen) { ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Test requires two queues, skipping\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); VkQueue q0 = m_device->m_queue; VkQueue q1 = nullptr; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &q1); ASSERT_NE(q1, nullptr); // An (empty) command buffer. We must have work in the first submission -- // the layer treats unfenced work differently from fenced work. VkCommandPoolCreateInfo cpci = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0, 0}; VkCommandPool pool; err = vkCreateCommandPool(m_device->device(), &cpci, nullptr, &pool); ASSERT_VK_SUCCESS(err); VkCommandBufferAllocateInfo cbai = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1}; VkCommandBuffer cb; err = vkAllocateCommandBuffers(m_device->device(), &cbai, &cb); ASSERT_VK_SUCCESS(err); VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr}; err = vkBeginCommandBuffer(cb, &cbbi); ASSERT_VK_SUCCESS(err); err = vkEndCommandBuffer(cb); ASSERT_VK_SUCCESS(err); // A semaphore VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore s; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s); ASSERT_VK_SUCCESS(err); // First submission, to q0 VkSubmitInfo s0 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &cb, 1, &s}; err = vkQueueSubmit(q0, 1, &s0, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Second submission, to q1, waiting on s VkFlags waitmask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // doesn't really matter what this value is. VkSubmitInfo s1 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &s, &waitmask, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(q1, 1, &s1, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); // Wait for q0 idle err = vkQueueWaitIdle(q0); ASSERT_VK_SUCCESS(err); // Command buffer should have been completed (it was on q0); reset the pool. vkFreeCommandBuffers(m_device->device(), pool, 1, &cb); m_errorMonitor->VerifyNotFound(); // Force device completely idle and clean up resources vkDeviceWaitIdle(m_device->device()); vkDestroyCommandPool(m_device->device(), pool, nullptr); vkDestroySemaphore(m_device->device(), s, nullptr); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence, " "followed by a WaitForFences call."); ASSERT_NO_FATAL_FAILURE(Init()); if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) { printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); VkQueue queue = VK_NULL_HANDLE; vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueWithSemaphoreAndOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, sharing a signal/wait semaphore, the second " "having a fence, followed by a WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 1; submit_info.pSignalSemaphores = &semaphore; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 1; submit_info.pWaitSemaphores = &semaphore; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueNullQueueSubmitWithFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, no fences, followed by a third QueueSubmit " "with NO SubmitInfos but with a fence, followed by a WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = VK_NULL_HANDLE; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = VK_NULL_HANDLE; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } vkQueueSubmit(m_device->m_queue, 0, NULL, fence); VkResult err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueOneFence) { TEST_DESCRIPTION( "Two command buffers, each in a separate QueueSubmit call on the same queue, the second having a fence, followed by a " "WaitForFences call."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[0]; submit_info.signalSemaphoreCount = 0; submit_info.pSignalSemaphores = VK_NULL_HANDLE; vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE); } { VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; VkSubmitInfo submit_info{}; submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info.commandBufferCount = 1; submit_info.pCommandBuffers = &command_buffer[1]; submit_info.waitSemaphoreCount = 0; submit_info.pWaitSemaphores = VK_NULL_HANDLE; submit_info.pWaitDstStageMask = flags; vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); m_errorMonitor->VerifyNotFound(); } // This is a positive test. No errors should be generated. TEST_F(VkPositiveLayerTest, TwoSubmitInfosWithSemaphoreOneQueueSubmitsOneFence) { TEST_DESCRIPTION( "Two command buffers each in a separate SubmitInfo sent in a single QueueSubmit call followed by a WaitForFences call."); ASSERT_NO_FATAL_FAILURE(Init()); m_errorMonitor->ExpectSuccess(); VkFence fence; VkFenceCreateInfo fence_create_info{}; fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO; vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence); VkSemaphore semaphore; VkSemaphoreCreateInfo semaphore_create_info{}; semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore); VkCommandPool command_pool; VkCommandPoolCreateInfo pool_create_info{}; pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO; pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_; pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT; vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool); VkCommandBuffer command_buffer[2]; VkCommandBufferAllocateInfo command_buffer_allocate_info{}; command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO; command_buffer_allocate_info.commandPool = command_pool; command_buffer_allocate_info.commandBufferCount = 2; command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY; vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer); { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[0], &begin_info); vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 0, nullptr); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[0], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[0]); } { VkCommandBufferBeginInfo begin_info{}; begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO; vkBeginCommandBuffer(command_buffer[1], &begin_info); VkViewport viewport{}; viewport.maxDepth = 1.0f; viewport.minDepth = 0.0f; viewport.width = 512; viewport.height = 512; viewport.x = 0; viewport.y = 0; vkCmdSetViewport(command_buffer[1], 0, 1, &viewport); vkEndCommandBuffer(command_buffer[1]); } { VkSubmitInfo submit_info[2]; VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT}; submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[0].pNext = NULL; submit_info[0].commandBufferCount = 1; submit_info[0].pCommandBuffers = &command_buffer[0]; submit_info[0].signalSemaphoreCount = 1; submit_info[0].pSignalSemaphores = &semaphore; submit_info[0].waitSemaphoreCount = 0; submit_info[0].pWaitSemaphores = NULL; submit_info[0].pWaitDstStageMask = 0; submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submit_info[1].pNext = NULL; submit_info[1].commandBufferCount = 1; submit_info[1].pCommandBuffers = &command_buffer[1]; submit_info[1].waitSemaphoreCount = 1; submit_info[1].pWaitSemaphores = &semaphore; submit_info[1].pWaitDstStageMask = flags; submit_info[1].signalSemaphoreCount = 0; submit_info[1].pSignalSemaphores = NULL; vkQueueSubmit(m_device->m_queue, 2, &submit_info[0], fence); } vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); vkDestroyFence(m_device->device(), fence, nullptr); vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]); vkDestroyCommandPool(m_device->device(), command_pool, NULL); vkDestroySemaphore(m_device->device(), semaphore, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, RenderPassSecondaryCommandBuffersMultipleTimes) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->VerifyNotFound(); vkCmdEndRenderPass(m_commandBuffer->handle()); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ValidRenderPassAttachmentLayoutWithLoadOp) { TEST_DESCRIPTION( "Positive test where we create a renderpass with an attachment that uses LOAD_OP_CLEAR, the first subpass has a valid " "layout, and a second subpass then uses a valid *READ_ONLY* layout."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkAttachmentReference attach[2] = {}; attach[0].attachment = 0; attach[0].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attach[1].attachment = 0; attach[1].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; VkSubpassDescription subpasses[2] = {}; // First subpass clears DS attach on load subpasses[0].pDepthStencilAttachment = &attach[0]; // 2nd subpass reads in DS as input attachment subpasses[1].inputAttachmentCount = 1; subpasses[1].pInputAttachments = &attach[1]; VkAttachmentDescription attach_desc = {}; attach_desc.format = depth_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE; attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; VkRenderPassCreateInfo rpci = {}; rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; rpci.subpassCount = 2; rpci.pSubpasses = subpasses; // Now create RenderPass and verify no errors VkRenderPass rp; vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, NULL); } TEST_F(VkPositiveLayerTest, RenderPassDepthStencilLayoutTransition) { TEST_DESCRIPTION( "Create a render pass with depth-stencil attachment where layout transition from UNDEFINED TO DS_READ_ONLY_OPTIMAL is set " "by render pass and verify that transition has correctly occurred at queue submit time with no validation errors."); ASSERT_NO_FATAL_FAILURE(Init()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageFormatProperties format_props; vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &format_props); if (format_props.maxExtent.width < 32 || format_props.maxExtent.height < 32) { printf("%s Depth extent too small, RenderPassDepthStencilLayoutTransition skipped.\n", kSkipPrefix); return; } m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A renderpass with one depth/stencil attachment. VkAttachmentDescription attachment = {0, depth_format, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // A compatible ds image. VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageViewCreateInfo ivci = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, nullptr, 0, image.handle(), VK_IMAGE_VIEW_TYPE_2D, depth_format, {VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY}, {VK_IMAGE_ASPECT_DEPTH_BIT, 0, 1, 0, 1}, }; VkImageView view; err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view); ASSERT_VK_SUCCESS(err); VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1}; VkFramebuffer fb; err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb); ASSERT_VK_SUCCESS(err); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr}; m_commandBuffer->begin(); vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); vkCmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyNotFound(); // Cleanup vkDestroyImageView(m_device->device(), view, NULL); vkDestroyRenderPass(m_device->device(), rp, NULL); vkDestroyFramebuffer(m_device->device(), fb, NULL); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribMatrixType) { TEST_DESCRIPTION("Test that pipeline validation accepts matrices passed as vertex attributes"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[2]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 2; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in mat2x4 x;\n" "void main(){\n" " gl_Position = x[0] + x[1];\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 2); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); /* expect success */ m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribArrayType) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[2]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 2; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x[2];\n" "void main(){\n" " gl_Position = x[0] + x[1];\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 2); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineAttribComponents) { TEST_DESCRIPTION( "Test that pipeline validation accepts consuming a vertex attribute through multiple vertex shader inputs, each consuming " "a different subset of the components."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs[3]; memset(input_attribs, 0, sizeof(input_attribs)); for (int i = 0; i < 3; i++) { input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT; input_attribs[i].location = i; } char const *vsSource = "#version 450\n" "\n" "layout(location=0) in vec4 x;\n" "layout(location=1) in vec3 y1;\n" "layout(location=1, component=3) in float y2;\n" "layout(location=2) in vec4 z;\n" "void main(){\n" " gl_Position = x + vec4(y1, y2) + z;\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(&input_binding, 1); pipe.AddVertexInputAttribs(input_attribs, 3); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineSimplePositive) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "void main(){\n" " gl_Position = vec4(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineRelaxedTypeMatch) { TEST_DESCRIPTION( "Test that pipeline validation accepts the relaxed type matching rules set out in 14.1.3: fundamental type must match, and " "producer side must have at least as many components"); m_errorMonitor->ExpectSuccess(); // VK 1.0.8 Specification, 14.1.3 "Additionally,..." block ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = "#version 450\n" "layout(location=0) out vec3 x;\n" "layout(location=1) out ivec3 y;\n" "layout(location=2) out vec3 z;\n" "void main(){\n" " gl_Position = vec4(0);\n" " x = vec3(0); y = ivec3(0); z = vec3(0);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "layout(location=0) in float x;\n" "layout(location=1) flat in int y;\n" "layout(location=2) in vec2 z;\n" "void main(){\n" " color = vec4(1 + x + y + z.x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkResult err = VK_SUCCESS; err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); ASSERT_VK_SUCCESS(err); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineTessPerVertex) { TEST_DESCRIPTION("Test that pipeline validation accepts per-vertex variables passed between the TCS and TES stages"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "void main(){}\n"; char const *tcsSource = "#version 450\n" "layout(location=0) out int x[];\n" "layout(vertices=3) out;\n" "void main(){\n" " gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n" " gl_TessLevelInner[0] = 1;\n" " x[gl_InvocationID] = gl_InvocationID;\n" "}\n"; char const *tesSource = "#version 450\n" "layout(triangles, equal_spacing, cw) in;\n" "layout(location=0) in int x[];\n" "void main(){\n" " gl_Position.xyz = gl_TessCoord;\n" " gl_Position.w = x[0] + x[1] + x[2];\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this); VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; VkPipelineObj pipe(m_device); pipe.SetInputAssembly(&iasci); pipe.SetTessellation(&tsci); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&tcs); pipe.AddShader(&tes); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineGeometryInputBlockPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts a user-defined interface block passed into the geometry shader. This is interesting " "because the 'extra' array level is not present on the member type, but on the block instance."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().geometryShader) { printf("%s Device does not support geometry shaders; skipped.\n", kSkipPrefix); return; } char const *vsSource = "#version 450\n" "layout(location=0) out VertexData { vec4 x; } vs_out;\n" "void main(){\n" " vs_out.x = vec4(1);\n" "}\n"; char const *gsSource = "#version 450\n" "layout(triangles) in;\n" "layout(triangle_strip, max_vertices=3) out;\n" "layout(location=0) in VertexData { vec4 x; } gs_in[];\n" "void main() {\n" " gl_Position = gs_in[0].x;\n" " EmitVertex();\n" "}\n"; char const *fsSource = "#version 450\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&gs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipeline64BitAttributesPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts basic use of 64bit vertex attributes. This is interesting because they consume " "multiple locations."); m_errorMonitor->ExpectSuccess(); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().shaderFloat64) { printf("%s Device does not support 64bit vertex attributes; skipped.\n", kSkipPrefix); return; } // Set 64bit format to support VTX Buffer feature PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { return; } VkFormatProperties format_props; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, &format_props); format_props.bufferFeatures |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT; fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, format_props); VkVertexInputBindingDescription input_bindings[1]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attribs[4]; memset(input_attribs, 0, sizeof(input_attribs)); input_attribs[0].location = 0; input_attribs[0].offset = 0; input_attribs[0].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[1].location = 2; input_attribs[1].offset = 32; input_attribs[1].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[2].location = 4; input_attribs[2].offset = 64; input_attribs[2].format = VK_FORMAT_R64G64B64A64_SFLOAT; input_attribs[3].location = 6; input_attribs[3].offset = 96; input_attribs[3].format = VK_FORMAT_R64G64B64A64_SFLOAT; char const *vsSource = "#version 450\n" "\n" "layout(location=0) in dmat4 x;\n" "void main(){\n" " gl_Position = vec4(x[0][0]);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(location=0) out vec4 color;\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddVertexInputBindings(input_bindings, 1); pipe.AddVertexInputAttribs(input_attribs, 4); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, CreatePipelineInputAttachmentPositive) { TEST_DESCRIPTION("Positive test for a correctly matched input attachment"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *vsSource = "#version 450\n" "\n" "void main(){\n" " gl_Position = vec4(1);\n" "}\n"; char const *fsSource = "#version 450\n" "\n" "layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n" "layout(location=0) out vec4 color;\n" "void main() {\n" " color = subpassLoad(x);\n" "}\n"; VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // should be OK. would go wrong here if it's going to... pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyNotFound(); vkDestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkPositiveLayerTest, CreateComputePipelineMissingDescriptorUnusedPositive) { TEST_DESCRIPTION( "Test that pipeline validation accepts a compute pipeline which declares a descriptor-backed resource which is not " "provided, but the shader does not statically use it. This is interesting because it requires compute pipelines to have a " "proper descriptor use walk, which they didn't for some time."); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) buffer block { vec4 x; };\n" "void main(){\n" " // x is not used.\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, descriptorSet.GetPipelineLayout(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsSampler) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming only the sampler portion of a combined image + sampler"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkDescriptorSetLayoutBinding> bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform sampler s;\n" "layout(set=0, binding=1) uniform texture2D t;\n" "layout(set=0, binding=2) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsImage) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming only the image portion of a combined image + sampler"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkDescriptorSetLayoutBinding> bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform texture2D t;\n" "layout(set=0, binding=1) uniform sampler s;\n" "layout(set=0, binding=2) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsBoth) { TEST_DESCRIPTION( "Test that pipeline validation accepts a shader consuming both the sampler and the image of a combined image+sampler but " "via separate variables"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<VkDescriptorSetLayoutBinding> bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }; const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = "#version 450\n" "\n" "layout(local_size_x=1) in;\n" "layout(set=0, binding=0) uniform texture2D t;\n" "layout(set=0, binding=0) uniform sampler s; // both binding 0!\n" "layout(set=0, binding=1) buffer block { vec4 x; };\n" "void main() {\n" " x = texture(sampler2D(t, s), vec2(0));\n" "}\n"; VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; VkPipeline pipe; VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyNotFound(); if (err == VK_SUCCESS) { vkDestroyPipeline(m_device->device(), pipe, nullptr); } } TEST_F(VkPositiveLayerTest, CreateDescriptorSetBindingWithIgnoredSamplers) { TEST_DESCRIPTION("Test that layers conditionally do ignore the pImmutableSamplers on vkCreateDescriptorSetLayout"); bool prop2_found = false; if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); prop2_found = true; } else { printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); bool push_descriptor_found = false; if (prop2_found && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); push_descriptor_found = true; } else { printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD; const uint64_t fake_address_32 = 0xCDCDCDCD; const void *fake_pointer = sizeof(void *) == 8 ? reinterpret_cast<void *>(fake_address_64) : reinterpret_cast<void *>(fake_address_32); const VkSampler *hopefully_undereferencable_pointer = reinterpret_cast<const VkSampler *>(fake_pointer); // regular descriptors m_errorMonitor->ExpectSuccess(); { const VkDescriptorSetLayoutBinding non_sampler_bindings[] = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {6, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {7, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {8, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, }; const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, 0, static_cast<uint32_t>(size(non_sampler_bindings)), non_sampler_bindings}; VkDescriptorSetLayout dsl; const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl); ASSERT_VK_SUCCESS(err); vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr); } m_errorMonitor->VerifyNotFound(); if (push_descriptor_found) { // push descriptors m_errorMonitor->ExpectSuccess(); { const VkDescriptorSetLayoutBinding non_sampler_bindings[] = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, {6, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer}, }; const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR, static_cast<uint32_t>(size(non_sampler_bindings)), non_sampler_bindings}; VkDescriptorSetLayout dsl; const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl); ASSERT_VK_SUCCESS(err); vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr); } m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, Maintenance1Tests) { TEST_DESCRIPTION("Validate various special cases for the Maintenance1_KHR extension"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); } else { printf("%s Maintenance1 Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkCommandBufferObj cmd_buf(m_device, m_commandPool); cmd_buf.begin(); // Set Negative height, should give error if Maintenance 1 is not enabled VkViewport viewport = {0, 0, 16, -16, 0, 1}; vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport); cmd_buf.end(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, DuplicateValidPNextStructures) { TEST_DESCRIPTION("Create a pNext chain containing valid structures, but with a duplicate structure type"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME); } else { printf("%s VK_NV_dedicated_allocation extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Create two pNext structures which by themselves would be valid VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {}; VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info_2 = {}; dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info.pNext = &dedicated_buffer_create_info_2; dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE; dedicated_buffer_create_info_2.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info_2.pNext = nullptr; dedicated_buffer_create_info_2.dedicatedAllocation = VK_TRUE; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = &dedicated_buffer_create_info; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "chain contains duplicate structure types"); VkBuffer buffer; vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, DedicatedAllocation) { ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkMemoryPropertyFlags mem_flags = 0; const VkDeviceSize resource_size = 1024; auto buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT); VkBufferObj buffer; buffer.init_no_mem(*m_device, buffer_info); auto buffer_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), mem_flags); auto buffer_dedicated_info = lvl_init_struct<VkMemoryDedicatedAllocateInfoKHR>(); buffer_dedicated_info.buffer = buffer.handle(); buffer_alloc_info.pNext = &buffer_dedicated_info; vk_testing::DeviceMemory dedicated_buffer_memory; dedicated_buffer_memory.init(*m_device, buffer_alloc_info); VkBufferObj wrong_buffer; wrong_buffer.init_no_mem(*m_device, buffer_info); // Bind with wrong buffer m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508"); vkBindBufferMemory(m_device->handle(), wrong_buffer.handle(), dedicated_buffer_memory.handle(), 0); m_errorMonitor->VerifyFound(); // Bind with non-zero offset (same VUID) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508"); // offset must be zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037"); // offset pushes us past size auto offset = buffer.memory_requirements().alignment; vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), offset); m_errorMonitor->VerifyFound(); // Bind correctly (depends on the "skip" above) m_errorMonitor->ExpectSuccess(); vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), 0); m_errorMonitor->VerifyNotFound(); // And for images... vk_testing::Image image; vk_testing::Image wrong_image; auto image_info = vk_testing::Image::create_info(); image_info.extent.width = resource_size; image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; image_info.format = VK_FORMAT_R8G8B8A8_UNORM; image.init_no_mem(*m_device, image_info); wrong_image.init_no_mem(*m_device, image_info); auto image_dedicated_info = lvl_init_struct<VkMemoryDedicatedAllocateInfoKHR>(); image_dedicated_info.image = image.handle(); auto image_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), mem_flags); image_alloc_info.pNext = &image_dedicated_info; vk_testing::DeviceMemory dedicated_image_memory; dedicated_image_memory.init(*m_device, image_alloc_info); // Bind with wrong image m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509"); vkBindImageMemory(m_device->handle(), wrong_image.handle(), dedicated_image_memory.handle(), 0); m_errorMonitor->VerifyFound(); // Bind with non-zero offset (same VUID) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509"); // offset must be zero m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049"); // offset pushes us past size auto image_offset = image.memory_requirements().alignment; vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), image_offset); m_errorMonitor->VerifyFound(); // Bind correctly (depends on the "skip" above) m_errorMonitor->ExpectSuccess(); vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), 0); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ValidStructPNext) { TEST_DESCRIPTION("Verify that a valid pNext value is handled correctly"); // Positive test to check parameter_validation and unique_objects support for NV_dedicated_allocation ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME); } else { printf("%s VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME Extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {}; dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV; dedicated_buffer_create_info.pNext = nullptr; dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE; uint32_t queue_family_index = 0; VkBufferCreateInfo buffer_create_info = {}; buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; buffer_create_info.pNext = &dedicated_buffer_create_info; buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; buffer_create_info.queueFamilyIndexCount = 1; buffer_create_info.pQueueFamilyIndices = &queue_family_index; VkBuffer buffer; VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements memory_reqs; vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs); VkDedicatedAllocationMemoryAllocateInfoNV dedicated_memory_info = {}; dedicated_memory_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV; dedicated_memory_info.pNext = nullptr; dedicated_memory_info.buffer = buffer; dedicated_memory_info.image = VK_NULL_HANDLE; VkMemoryAllocateInfo memory_info = {}; memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; memory_info.pNext = &dedicated_memory_info; memory_info.allocationSize = memory_reqs.size; bool pass; pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0); ASSERT_TRUE(pass); VkDeviceMemory buffer_memory; err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory); ASSERT_VK_SUCCESS(err); err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0); ASSERT_VK_SUCCESS(err); vkDestroyBuffer(m_device->device(), buffer, NULL); vkFreeMemory(m_device->device(), buffer_memory, NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, PSOPolygonModeValid) { TEST_DESCRIPTION("Verify that using a solid polygon fill mode works correctly."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Artificially disable support for non-solid fill modes features.fillModeNonSolid = false; // The sacrificial device object VkDeviceObj test_device(0, gpu(), device_extension_names, &features); VkRenderpassObj render_pass(&test_device); const VkPipelineLayoutObj pipeline_layout(&test_device); VkPipelineRasterizationStateCreateInfo rs_ci = {}; rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO; rs_ci.pNext = nullptr; rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = false; VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set polygonMode=FILL. No error is expected m_errorMonitor->ExpectSuccess(); { VkPipelineObj pipe(&test_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); // Set polygonMode to a good value rs_ci.polygonMode = VK_POLYGON_MODE_FILL; pipe.SetRasterization(&rs_ci); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); } m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, LongSemaphoreChain) { m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); VkResult err; std::vector<VkSemaphore> semaphores; const int chainLength = 32768; VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; for (int i = 0; i < chainLength; i++) { VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0}; VkSemaphore semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &semaphore); ASSERT_VK_SUCCESS(err); semaphores.push_back(semaphore); VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, semaphores.size() > 1 ? 1u : 0u, semaphores.size() > 1 ? &semaphores[semaphores.size() - 2] : nullptr, &flags, 0, nullptr, 1, &semaphores[semaphores.size() - 1]}; err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); } VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; VkFence fence; err = vkCreateFence(m_device->device(), &fci, nullptr, &fence); ASSERT_VK_SUCCESS(err); VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &semaphores.back(), &flags, 0, nullptr, 0, nullptr}; err = vkQueueSubmit(m_device->m_queue, 1, &si, fence); ASSERT_VK_SUCCESS(err); vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX); for (auto semaphore : semaphores) vkDestroySemaphore(m_device->device(), semaphore, nullptr); vkDestroyFence(m_device->device(), fence, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ExternalSemaphore) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external semaphore instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external semaphore device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME); } else { printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external semaphore import and export capability VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr, handle_type}; VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR"); vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp); if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) || !(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); // Create a semaphore to export payload from VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type}; VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0}; VkSemaphore export_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore); ASSERT_VK_SUCCESS(err); // Create a semaphore to import payload into sci.pNext = nullptr; VkSemaphore import_semaphore; err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore); ASSERT_VK_SUCCESS(err); #ifdef _WIN32 // Export semaphore payload to an opaque handle HANDLE handle = nullptr; VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreWin32HandleKHR = (PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR"); err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above VkImportSemaphoreWin32HandleInfoKHR ihi = { VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, 0, handle_type, handle, nullptr}; auto vkImportSemaphoreWin32HandleKHR = (PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR"); err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #else // Export semaphore payload to an opaque handle int fd = 0; VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type}; auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR"); err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd); ASSERT_VK_SUCCESS(err); // Import opaque handle exported above VkImportSemaphoreFdInfoKHR ihi = { VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, 0, handle_type, fd}; auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR"); err = vkImportSemaphoreFdKHR(m_device->device(), &ihi); ASSERT_VK_SUCCESS(err); #endif // Signal the exported semaphore and wait on the imported semaphore VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT; VkSubmitInfo si[] = { {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore}, {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr}, }; err = vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); if (m_device->phy().features().sparseBinding) { // Signal the imported semaphore and wait on the exported semaphore VkBindSparseInfo bi[] = { {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore}, {VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr}, }; err = vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE); ASSERT_VK_SUCCESS(err); } // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroySemaphore(m_device->device(), export_semaphore, nullptr); vkDestroySemaphore(m_device->device(), import_semaphore, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ExternalFence) { #ifdef _WIN32 const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external fence instance extensions if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME); m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for external fence device extensions if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) { m_device_extension_names.push_back(extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME); } else { printf("%s External fence extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Check for external fence import and export capability VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type}; VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr}; auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR"); vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp); if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) || !(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix); return; } VkResult err; m_errorMonitor->ExpectSuccess(); // Create a fence to export payload from VkFence export_fence; { VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type}; VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence); ASSERT_VK_SUCCESS(err); } // Create a fence to import payload into VkFence import_fence; { VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0}; err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence); ASSERT_VK_SUCCESS(err); } #ifdef _WIN32 // Export fence payload to an opaque handle HANDLE handle = nullptr; { VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceWin32HandleKHR = (PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR"); err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceWin32HandleInfoKHR ifi = { VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, 0, handle_type, handle, nullptr}; auto vkImportFenceWin32HandleKHR = (PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR"); err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #else // Export fence payload to an opaque handle int fd = 0; { VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type}; auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR"); err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd); ASSERT_VK_SUCCESS(err); } // Import opaque handle exported above { VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, 0, handle_type, fd}; auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR"); err = vkImportFenceFdKHR(m_device->device(), &ifi); ASSERT_VK_SUCCESS(err); } #endif // Signal the exported fence and wait on the imported fence vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence); vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &import_fence); vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence); vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &import_fence); // Signal the imported fence and wait on the exported fence vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &export_fence); vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence); vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000); vkResetFences(m_device->device(), 1, &export_fence); // Cleanup err = vkQueueWaitIdle(m_device->m_queue); ASSERT_VK_SUCCESS(err); vkDestroyFence(m_device->device(), export_fence, nullptr); vkDestroyFence(m_device->device(), import_fence, nullptr); m_errorMonitor->VerifyNotFound(); } extern "C" void *ReleaseNullFence(void *arg) { struct thread_data_struct *data = (struct thread_data_struct *)arg; for (int i = 0; i < 40000; i++) { vkDestroyFence(data->device, VK_NULL_HANDLE, NULL); if (data->bailout) { break; } } return NULL; } TEST_F(VkPositiveLayerTest, ThreadNullFenceCollision) { test_platform_thread thread; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR"); ASSERT_NO_FATAL_FAILURE(Init()); struct thread_data_struct data; data.device = m_device->device(); data.bailout = false; m_errorMonitor->SetBailout(&data.bailout); // Call vkDestroyFence of VK_NULL_HANDLE repeatedly using multiple threads. // There should be no validation error from collision of that non-object. test_platform_thread_create(&thread, ReleaseNullFence, (void *)&data); for (int i = 0; i < 40000; i++) { vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL); } test_platform_thread_join(thread, NULL); m_errorMonitor->SetBailout(NULL); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, ClearColorImageWithValidRange) { TEST_DESCRIPTION("Record clear color with a valid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try good case { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyNotFound(); } // Try good case with VK_REMAINING { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range); m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, ClearDepthStencilWithValidRange) { TEST_DESCRIPTION("Record clear depth with a valid VkImageSubresourceRange"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto depth_format = FindSupportedDepthStencilFormat(gpu()); if (!depth_format) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } VkImageObj image(m_device); image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.create_info().arrayLayers == 1); ASSERT_TRUE(image.initialized()); const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); const VkClearDepthStencilValue clear_value = {}; m_commandBuffer->begin(); const auto cb_handle = m_commandBuffer->handle(); // Try good case { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 1}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyNotFound(); } // Try good case with VK_REMAINING { m_errorMonitor->ExpectSuccess(); VkImageSubresourceRange range = {ds_aspect, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS}; vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range); m_errorMonitor->VerifyNotFound(); } } TEST_F(VkPositiveLayerTest, CreateGraphicsPipelineWithIgnoredPointers) { TEST_DESCRIPTION("Create Graphics Pipeline with pointers that must be ignored by layers"); ASSERT_NO_FATAL_FAILURE(Init()); m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != 0); m_depthStencil->Init(m_device, static_cast<int32_t>(m_width), static_cast<int32_t>(m_height), m_depth_stencil_fmt); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(m_depthStencil->BindInfo())); const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD; const uint64_t fake_address_32 = 0xCDCDCDCD; void *hopefully_undereferencable_pointer = sizeof(void *) == 8 ? reinterpret_cast<void *>(fake_address_64) : reinterpret_cast<void *>(fake_address_32); VkShaderObj vs(m_device, "#version 450\nvoid main(){gl_Position = vec4(0.0, 0.0, 0.0, 1.0);}\n", VK_SHADER_STAGE_VERTEX_BIT, this); const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // bindings 0, nullptr // attributes }; const VkPipelineInputAssemblyStateCreateInfo pipeline_input_assembly_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, VK_FALSE // primitive restart }; const VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info_template{ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_FALSE, // depthClamp VK_FALSE, // rasterizerDiscardEnable VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_COUNTER_CLOCKWISE, VK_FALSE, // depthBias 0.0f, 0.0f, 0.0f, // depthBias params 1.0f // lineWidth }; VkPipelineLayout pipeline_layout; { VkPipelineLayoutCreateInfo pipeline_layout_create_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // layouts 0, nullptr // push constants }; VkResult err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_create_info, nullptr, &pipeline_layout); ASSERT_VK_SUCCESS(err); } // try disabled rasterizer and no tessellation { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_TRUE; VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, reinterpret_cast<const VkPipelineTessellationStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineViewportStateCreateInfo *>(hopefully_undereferencable_pointer), &pipeline_rasterization_state_create_info, reinterpret_cast<const VkPipelineMultisampleStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineColorBlendStateCreateInfo *>(hopefully_undereferencable_pointer), nullptr, // dynamic states pipeline_layout, m_renderPass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); } const VkPipelineMultisampleStateCreateInfo pipeline_multisample_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_SAMPLE_COUNT_1_BIT, VK_FALSE, // sample shading 0.0f, // minSampleShading nullptr, // pSampleMask VK_FALSE, // alphaToCoverageEnable VK_FALSE // alphaToOneEnable }; // try enabled rasterizer but no subpass attachments { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE; VkViewport viewport = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}; VkRect2D scissor = {{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}}; const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 1, &viewport, 1, &scissor}; VkRenderPass render_pass; { VkSubpassDescription subpass_desc = {}; VkRenderPassCreateInfo render_pass_create_info{ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, // pNext 0, // flags 0, nullptr, // attachments 1, &subpass_desc, 0, nullptr // subpass dependencies }; VkResult err = vkCreateRenderPass(m_device->handle(), &render_pass_create_info, nullptr, &render_pass); ASSERT_VK_SUCCESS(err); } VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{ VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, &pipeline_viewport_state_create_info, &pipeline_rasterization_state_create_info, &pipeline_multisample_state_create_info, reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo *>(hopefully_undereferencable_pointer), reinterpret_cast<const VkPipelineColorBlendStateCreateInfo *>(hopefully_undereferencable_pointer), nullptr, // dynamic states pipeline_layout, render_pass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); vkDestroyRenderPass(m_device->handle(), render_pass, nullptr); } // try dynamic viewport and scissor { m_errorMonitor->ExpectSuccess(); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE; const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 1, reinterpret_cast<const VkViewport *>(hopefully_undereferencable_pointer), 1, reinterpret_cast<const VkRect2D *>(hopefully_undereferencable_pointer)}; const VkPipelineDepthStencilStateCreateInfo pipeline_depth_stencil_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, nullptr, // pNext 0, // flags }; const VkPipelineColorBlendAttachmentState pipeline_color_blend_attachment_state = {}; const VkPipelineColorBlendStateCreateInfo pipeline_color_blend_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, nullptr, // pNext 0, // flags VK_FALSE, VK_LOGIC_OP_CLEAR, 1, &pipeline_color_blend_attachment_state, {0.0f, 0.0f, 0.0f, 0.0f}}; const VkDynamicState dynamic_states[2] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; const VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, nullptr, // pNext 0, // flags 2, dynamic_states}; VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, // pNext 0, // flags 1, // stageCount &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, &pipeline_viewport_state_create_info, &pipeline_rasterization_state_create_info, &pipeline_multisample_state_create_info, &pipeline_depth_stencil_state_create_info, &pipeline_color_blend_state_create_info, &pipeline_dynamic_state_create_info, // dynamic states pipeline_layout, m_renderPass, 0, // subpass VK_NULL_HANDLE, 0}; VkPipeline pipeline; vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vkDestroyPipeline(m_device->handle(), pipeline, nullptr); } vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, nullptr); } TEST_F(VkPositiveLayerTest, ExternalMemory) { TEST_DESCRIPTION("Perform a copy through a pair of buffers linked by external memory"); #ifdef _WIN32 const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR; #else const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME; const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR; #endif // Check for external memory instance extensions std::vector<const char *> reqd_instance_extensions = { {VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}}; for (auto extension_name : reqd_instance_extensions) { if (InstanceExtensionSupported(extension_name)) { m_instance_extension_names.push_back(extension_name); } else { printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name); return; } } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for import/export capability VkPhysicalDeviceExternalBufferInfoKHR ebi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, nullptr, 0, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, handle_type}; VkExternalBufferPropertiesKHR ebp = {VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, nullptr, {0, 0, 0}}; auto vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)vkGetInstanceProcAddr( instance(), "vkGetPhysicalDeviceExternalBufferPropertiesKHR"); ASSERT_TRUE(vkGetPhysicalDeviceExternalBufferPropertiesKHR != nullptr); vkGetPhysicalDeviceExternalBufferPropertiesKHR(gpu(), &ebi, &ebp); if (!(ebp.externalMemoryProperties.compatibleHandleTypes & handle_type) || !(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) || !(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR)) { printf("%s External buffer does not support importing and exporting, skipping test\n", kSkipPrefix); return; } // Check if dedicated allocation is required bool dedicated_allocation = ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR; if (dedicated_allocation) { if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix); return; } } // Check for external memory device extensions if (DeviceExtensionSupported(gpu(), nullptr, ext_mem_extension_name)) { m_device_extension_names.push_back(ext_mem_extension_name); m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME); } else { printf("%s External memory extension not supported, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); VkMemoryPropertyFlags mem_flags = 0; const VkDeviceSize buffer_size = 1024; // Create export and import buffers const VkExternalMemoryBufferCreateInfoKHR external_buffer_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR, nullptr, handle_type}; auto buffer_info = VkBufferObj::create_info(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT); buffer_info.pNext = &external_buffer_info; VkBufferObj buffer_export; buffer_export.init_no_mem(*m_device, buffer_info); VkBufferObj buffer_import; buffer_import.init_no_mem(*m_device, buffer_info); // Allocation info auto alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_export.memory_requirements(), mem_flags); // Add export allocation info to pNext chain VkExportMemoryAllocateInfoKHR export_info = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, nullptr, handle_type}; alloc_info.pNext = &export_info; // Add dedicated allocation info to pNext chain if required VkMemoryDedicatedAllocateInfoKHR dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr, VK_NULL_HANDLE, buffer_export.handle()}; if (dedicated_allocation) { export_info.pNext = &dedicated_info; } // Allocate memory to be exported vk_testing::DeviceMemory memory_export; memory_export.init(*m_device, alloc_info); // Bind exported memory buffer_export.bind_memory(memory_export, 0); #ifdef _WIN32 // Export memory to handle auto vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryWin32HandleKHR"); ASSERT_TRUE(vkGetMemoryWin32HandleKHR != nullptr); VkMemoryGetWin32HandleInfoKHR mghi = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr, memory_export.handle(), handle_type}; HANDLE handle; ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi, &handle)); VkImportMemoryWin32HandleInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr, handle_type, handle}; #else // Export memory to fd auto vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryFdKHR"); ASSERT_TRUE(vkGetMemoryFdKHR != nullptr); VkMemoryGetFdInfoKHR mgfi = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_export.handle(), handle_type}; int fd; ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi, &fd)); VkImportMemoryFdInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd}; #endif // Import memory alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_import.memory_requirements(), mem_flags); alloc_info.pNext = &import_info; vk_testing::DeviceMemory memory_import; memory_import.init(*m_device, alloc_info); // Bind imported memory buffer_import.bind_memory(memory_import, 0); // Create test buffers and fill input buffer VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; VkBufferObj buffer_input; buffer_input.init_as_src_and_dst(*m_device, buffer_size, mem_prop); auto input_mem = (uint8_t *)buffer_input.memory().map(); for (uint32_t i = 0; i < buffer_size; i++) { input_mem[i] = (i & 0xFF); } buffer_input.memory().unmap(); VkBufferObj buffer_output; buffer_output.init_as_src_and_dst(*m_device, buffer_size, mem_prop); // Copy from input buffer to output buffer through the exported/imported memory m_commandBuffer->begin(); VkBufferCopy copy_info = {0, 0, buffer_size}; vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_input.handle(), buffer_export.handle(), 1, &copy_info); // Insert memory barrier to guarantee copy order VkMemoryBarrier mem_barrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT}; vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1, &mem_barrier, 0, nullptr, 0, nullptr); vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_import.handle(), buffer_output.handle(), 1, &copy_info); m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateRenderPass) { TEST_DESCRIPTION("Verify error messages for supported and unsupported sample counts in render pass attachments."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(); std::vector<VkAttachmentDescription> attachments; { VkAttachmentDescription att = {}; att.format = VK_FORMAT_R8G8B8A8_UNORM; att.samples = VK_SAMPLE_COUNT_1_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments.push_back(att); att.format = VK_FORMAT_D16_UNORM; att.samples = VK_SAMPLE_COUNT_4_BIT; att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.storeOp = VK_ATTACHMENT_STORE_OP_STORE; att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE; att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; attachments.push_back(att); } VkAttachmentReference color_ref = {}; color_ref.attachment = 0; color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference depth_ref = {}; depth_ref.attachment = 1; depth_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_ref; subpass.pDepthStencilAttachment = &depth_ref; VkRenderPassCreateInfo rp_info = {}; rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO; rp_info.attachmentCount = attachments.size(); rp_info.pAttachments = attachments.data(); rp_info.subpassCount = 1; rp_info.pSubpasses = &subpass; vkCreateRenderPass(device(), &rp_info, NULL, &m_renderPass); m_errorMonitor->VerifyNotFound(); // Expect an error message for invalid sample counts m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pColorAttachments-01506"); attachments[0].samples = VK_SAMPLE_COUNT_4_BIT; attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; { VkRenderPass render_pass; VkResult err = vkCreateRenderPass(device(), &rp_info, NULL, &render_pass); m_errorMonitor->VerifyFound(); ASSERT_NE(err, VK_SUCCESS); } } TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateGraphicsPipeline) { TEST_DESCRIPTION("Verify an error message for an incorrect graphics pipeline rasterization sample count."); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkRenderpassObj render_pass(m_device); const VkPipelineLayoutObj pipeline_layout(m_device); VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this); VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // Set a mismatched sample count VkPipelineMultisampleStateCreateInfo ms_state_ci = {}; ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO; ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&ms_state_ci); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkPositiveLayerTest, ParameterLayerFeatures2Capture) { TEST_DESCRIPTION("Ensure parameter_validation_layer correctly captures physical device features"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); VkResult err; m_errorMonitor->ExpectSuccess(); VkPhysicalDeviceFeatures2KHR features2; features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR; features2.pNext = nullptr; vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); // We're not creating a valid m_device, but the phy wrapper is useful vk_testing::PhysicalDevice physical_device(gpu()); vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties()); // Only request creation with queuefamilies that have at least one queue std::vector<VkDeviceQueueCreateInfo> create_queue_infos; auto qci = queue_info.data(); for (uint32_t i = 0; i < queue_info.size(); ++i) { if (qci[i].queueCount) { create_queue_infos.push_back(qci[i]); } } VkDeviceCreateInfo dev_info = {}; dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; dev_info.pNext = &features2; dev_info.flags = 0; dev_info.queueCreateInfoCount = create_queue_infos.size(); dev_info.pQueueCreateInfos = create_queue_infos.data(); dev_info.enabledLayerCount = 0; dev_info.ppEnabledLayerNames = nullptr; dev_info.enabledExtensionCount = 0; dev_info.ppEnabledExtensionNames = nullptr; dev_info.pEnabledFeatures = nullptr; VkDevice device; err = vkCreateDevice(gpu(), &dev_info, nullptr, &device); ASSERT_VK_SUCCESS(err); if (features2.features.samplerAnisotropy) { // Test that the parameter layer is caching the features correctly using CreateSampler VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); // If the features were not captured correctly, this should cause an error sampler_ci.anisotropyEnable = VK_TRUE; sampler_ci.maxAnisotropy = physical_device.properties().limits.maxSamplerAnisotropy; VkSampler sampler = VK_NULL_HANDLE; err = vkCreateSampler(device, &sampler_ci, nullptr, &sampler); ASSERT_VK_SUCCESS(err); vkDestroySampler(device, sampler, nullptr); } else { printf("%s Feature samplerAnisotropy not enabled; parameter_layer check skipped.\n", kSkipPrefix); } // Verify the core validation layer has captured the physical device features by creating a a query pool. if (features2.features.pipelineStatisticsQuery) { VkQueryPool query_pool; VkQueryPoolCreateInfo qpci{}; qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS; qpci.queryCount = 1; err = vkCreateQueryPool(device, &qpci, nullptr, &query_pool); ASSERT_VK_SUCCESS(err); vkDestroyQueryPool(device, query_pool, nullptr); } else { printf("%s Feature pipelineStatisticsQuery not enabled; core_validation_layer check skipped.\n", kSkipPrefix); } vkDestroyDevice(device, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, GetMemoryRequirements2) { TEST_DESCRIPTION( "Get memory requirements with VK_KHR_get_memory_requirements2 instead of core entry points and verify layers do not emit " "errors when objects are bound and used"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for VK_KHR_get_memory_requirementes2 extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a test buffer VkBufferObj buffer; buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT)); // Use extension to get buffer memory requirements auto vkGetBufferMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetBufferMemoryRequirements2KHR>( vkGetDeviceProcAddr(m_device->device(), "vkGetBufferMemoryRequirements2KHR")); ASSERT_TRUE(vkGetBufferMemoryRequirements2KHR != nullptr); VkBufferMemoryRequirementsInfo2KHR buffer_info = {VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr, buffer.handle()}; VkMemoryRequirements2KHR buffer_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; vkGetBufferMemoryRequirements2KHR(m_device->device(), &buffer_info, &buffer_reqs); // Allocate and bind buffer memory vk_testing::DeviceMemory buffer_memory; buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_reqs.memoryRequirements, 0)); vkBindBufferMemory(m_device->device(), buffer.handle(), buffer_memory.handle(), 0); // Create a test image auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.format = VK_FORMAT_R8G8B8A8_UNORM; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); // Use extension to get image memory requirements auto vkGetImageMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetImageMemoryRequirements2KHR>( vkGetDeviceProcAddr(m_device->device(), "vkGetImageMemoryRequirements2KHR")); ASSERT_TRUE(vkGetImageMemoryRequirements2KHR != nullptr); VkImageMemoryRequirementsInfo2KHR image_info = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr, image.handle()}; VkMemoryRequirements2KHR image_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; vkGetImageMemoryRequirements2KHR(m_device->device(), &image_info, &image_reqs); // Allocate and bind image memory vk_testing::DeviceMemory image_memory; image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_reqs.memoryRequirements, 0)); vkBindImageMemory(m_device->device(), image.handle(), image_memory.handle(), 0); // Now execute arbitrary commands that use the test buffer and image m_commandBuffer->begin(); // Fill buffer with 0 vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0); // Transition and clear image const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT); const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, subresource_range); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); const VkClearColorValue color = {}; vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range); // Submit and verify no validation errors m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, BindMemory2) { TEST_DESCRIPTION( "Bind memory with VK_KHR_bind_memory2 instead of core entry points and verify layers do not emit errors when objects are " "used"); ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); // Check for VK_KHR_get_memory_requirementes2 extensions if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT); // Create a test buffer VkBufferObj buffer; buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_DST_BIT)); // Allocate buffer memory vk_testing::DeviceMemory buffer_memory; buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), 0)); // Bind buffer memory with extension auto vkBindBufferMemory2KHR = reinterpret_cast<PFN_vkBindBufferMemory2KHR>(vkGetDeviceProcAddr(m_device->device(), "vkBindBufferMemory2KHR")); ASSERT_TRUE(vkBindBufferMemory2KHR != nullptr); VkBindBufferMemoryInfoKHR buffer_bind_info = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, nullptr, buffer.handle(), buffer_memory.handle(), 0}; vkBindBufferMemory2KHR(m_device->device(), 1, &buffer_bind_info); // Create a test image auto image_ci = vk_testing::Image::create_info(); image_ci.imageType = VK_IMAGE_TYPE_2D; image_ci.extent.width = 32; image_ci.extent.height = 32; image_ci.format = VK_FORMAT_R8G8B8A8_UNORM; image_ci.tiling = VK_IMAGE_TILING_OPTIMAL; image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT; vk_testing::Image image; image.init_no_mem(*m_device, image_ci); // Allocate image memory vk_testing::DeviceMemory image_memory; image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), 0)); // Bind image memory with extension auto vkBindImageMemory2KHR = reinterpret_cast<PFN_vkBindImageMemory2KHR>(vkGetDeviceProcAddr(m_device->device(), "vkBindImageMemory2KHR")); ASSERT_TRUE(vkBindImageMemory2KHR != nullptr); VkBindImageMemoryInfoKHR image_bind_info = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, nullptr, image.handle(), image_memory.handle(), 0}; vkBindImageMemory2KHR(m_device->device(), 1, &image_bind_info); // Now execute arbitrary commands that use the test buffer and image m_commandBuffer->begin(); // Fill buffer with 0 vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0); // Transition and clear image const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT); const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, subresource_range); vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1, &barrier); const VkClearColorValue color = {}; vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range); // Submit and verify no validation errors m_commandBuffer->end(); m_commandBuffer->QueueCommandBuffer(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkPositiveLayerTest, MultiplaneImageTests) { TEST_DESCRIPTION("Positive test of multiplane image operations"); // Enable KHR multiplane req'd extensions bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION); if (mp_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); if (mp_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME); } else { printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkImageCreateInfo ci = {}; ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; ci.pNext = NULL; ci.flags = 0; ci.imageType = VK_IMAGE_TYPE_2D; ci.format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR; ci.tiling = VK_IMAGE_TILING_OPTIMAL; ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT; ci.extent = {128, 128, 1}; ci.mipLevels = 1; ci.arrayLayers = 1; ci.samples = VK_SAMPLE_COUNT_1_BIT; ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; // Verify format VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT; bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features); if (!supported) { printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix); return; // Assume there's low ROI on searching for different mp formats } VkImage image; ASSERT_VK_SUCCESS(vkCreateImage(device(), &ci, NULL, &image)); // Allocate & bind memory VkPhysicalDeviceMemoryProperties phys_mem_props; vkGetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props); VkMemoryRequirements mem_reqs; vkGetImageMemoryRequirements(device(), image, &mem_reqs); VkDeviceMemory mem_obj = VK_NULL_HANDLE; VkMemoryPropertyFlagBits mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; for (uint32_t type = 0; type < phys_mem_props.memoryTypeCount; type++) { if ((mem_reqs.memoryTypeBits & (1 << type)) && ((phys_mem_props.memoryTypes[type].propertyFlags & mem_props) == mem_props)) { VkMemoryAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO; alloc_info.allocationSize = mem_reqs.size; alloc_info.memoryTypeIndex = type; ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &mem_obj)); break; } } if (VK_NULL_HANDLE == mem_obj) { printf("%s Unable to allocate image memory. Skipping test.\n", kSkipPrefix); vkDestroyImage(device(), image, NULL); return; } ASSERT_VK_SUCCESS(vkBindImageMemory(device(), image, mem_obj, 0)); // Copy plane 0 to plane 2 VkImageCopy copyRegion = {}; copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR; copyRegion.srcSubresource.mipLevel = 0; copyRegion.srcSubresource.baseArrayLayer = 0; copyRegion.srcSubresource.layerCount = 1; copyRegion.srcOffset = {0, 0, 0}; copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; copyRegion.dstSubresource.mipLevel = 0; copyRegion.dstSubresource.baseArrayLayer = 0; copyRegion.dstSubresource.layerCount = 1; copyRegion.dstOffset = {0, 0, 0}; copyRegion.extent.width = 128; copyRegion.extent.height = 128; copyRegion.extent.depth = 1; m_errorMonitor->ExpectSuccess(); m_commandBuffer->begin(); m_commandBuffer->CopyImage(image, VK_IMAGE_LAYOUT_GENERAL, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion); m_commandBuffer->end(); m_errorMonitor->VerifyNotFound(); #if 0 // Copy to/from buffer VkBufferCreateInfo bci = {}; bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; bci.pNext = NULL; bci.size = 128 * 128 * 3; bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE; VkBuffer buffer; ASSERT_VK_SUCCESS(vkCreateBuffer(device(), &bci, NULL, &buffer)); VkBufferImageCopy copy_region = {}; copy_region.bufferRowLength = 128; copy_region.bufferImageHeight = 128; copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR; copy_region.imageSubresource.layerCount = 1; copy_region.imageExtent.height = 64; copy_region.imageExtent.width = 64; copy_region.imageExtent.depth = 1; m_errorMonitor->ExpectSuccess(); vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image,VK_IMAGE_LAYOUT_GENERAL, buffer, 1, &copy_region); m_errorMonitor->VerifyNotFound(); m_errorMonitor->ExpectSuccess(); copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR; vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region); m_errorMonitor->VerifyNotFound(); #endif vkFreeMemory(device(), mem_obj, NULL); vkDestroyImage(device(), image, NULL); } TEST_F(VkPositiveLayerTest, ApiVersionZero) { TEST_DESCRIPTION("Check that apiVersion = 0 is valid."); m_errorMonitor->ExpectSuccess(); app_info.apiVersion = 0U; ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor)); m_errorMonitor->VerifyNotFound(); } #if defined(ANDROID) && defined(VALIDATION_APK) const char *appTag = "VulkanLayerValidationTests"; static bool initialized = false; static bool active = false; // Convert Intents to argv // Ported from Hologram sample, only difference is flexible key std::vector<std::string> get_args(android_app &app, const char *intent_extra_data_key) { std::vector<std::string> args; JavaVM &vm = *app.activity->vm; JNIEnv *p_env; if (vm.AttachCurrentThread(&p_env, nullptr) != JNI_OK) return args; JNIEnv &env = *p_env; jobject activity = app.activity->clazz; jmethodID get_intent_method = env.GetMethodID(env.GetObjectClass(activity), "getIntent", "()Landroid/content/Intent;"); jobject intent = env.CallObjectMethod(activity, get_intent_method); jmethodID get_string_extra_method = env.GetMethodID(env.GetObjectClass(intent), "getStringExtra", "(Ljava/lang/String;)Ljava/lang/String;"); jvalue get_string_extra_args; get_string_extra_args.l = env.NewStringUTF(intent_extra_data_key); jstring extra_str = static_cast<jstring>(env.CallObjectMethodA(intent, get_string_extra_method, &get_string_extra_args)); std::string args_str; if (extra_str) { const char *extra_utf = env.GetStringUTFChars(extra_str, nullptr); args_str = extra_utf; env.ReleaseStringUTFChars(extra_str, extra_utf); env.DeleteLocalRef(extra_str); } env.DeleteLocalRef(get_string_extra_args.l); env.DeleteLocalRef(intent); vm.DetachCurrentThread(); // split args_str std::stringstream ss(args_str); std::string arg; while (std::getline(ss, arg, ' ')) { if (!arg.empty()) args.push_back(arg); } return args; } void addFullTestCommentIfPresent(const ::testing::TestInfo &test_info, std::string &error_message) { const char *const type_param = test_info.type_param(); const char *const value_param = test_info.value_param(); if (type_param != NULL || value_param != NULL) { error_message.append(", where "); if (type_param != NULL) { error_message.append("TypeParam = ").append(type_param); if (value_param != NULL) error_message.append(" and "); } if (value_param != NULL) { error_message.append("GetParam() = ").append(value_param); } } } // Inspired by https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md class LogcatPrinter : public ::testing::EmptyTestEventListener { // Called before a test starts. virtual void OnTestStart(const ::testing::TestInfo &test_info) { __android_log_print(ANDROID_LOG_INFO, appTag, "[ RUN ] %s.%s", test_info.test_case_name(), test_info.name()); } // Called after a failed assertion or a SUCCEED() invocation. virtual void OnTestPartResult(const ::testing::TestPartResult &result) { // If the test part succeeded, we don't need to do anything. if (result.type() == ::testing::TestPartResult::kSuccess) return; __android_log_print(ANDROID_LOG_INFO, appTag, "%s in %s:%d %s", result.failed() ? "*** Failure" : "Success", result.file_name(), result.line_number(), result.summary()); } // Called after a test ends. virtual void OnTestEnd(const ::testing::TestInfo &info) { std::string result; if (info.result()->Passed()) { result.append("[ OK ]"); } else { result.append("[ FAILED ]"); } result.append(info.test_case_name()).append(".").append(info.name()); if (info.result()->Failed()) addFullTestCommentIfPresent(info, result); if (::testing::GTEST_FLAG(print_time)) { std::ostringstream os; os << info.result()->elapsed_time(); result.append(" (").append(os.str()).append(" ms)"); } __android_log_print(ANDROID_LOG_INFO, appTag, "%s", result.c_str()); }; }; static int32_t processInput(struct android_app *app, AInputEvent *event) { return 0; } static void processCommand(struct android_app *app, int32_t cmd) { switch (cmd) { case APP_CMD_INIT_WINDOW: { if (app->window) { initialized = true; } break; } case APP_CMD_GAINED_FOCUS: { active = true; break; } case APP_CMD_LOST_FOCUS: { active = false; break; } } } void android_main(struct android_app *app) { int vulkanSupport = InitVulkan(); if (vulkanSupport == 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "==== FAILED ==== No Vulkan support found"); return; } app->onAppCmd = processCommand; app->onInputEvent = processInput; while (1) { int events; struct android_poll_source *source; while (ALooper_pollAll(active ? 0 : -1, NULL, &events, (void **)&source) >= 0) { if (source) { source->process(app, source); } if (app->destroyRequested != 0) { VkTestFramework::Finish(); return; } } if (initialized && active) { // Use the following key to send arguments to gtest, i.e. // --es args "--gtest_filter=-VkLayerTest.foo" const char key[] = "args"; std::vector<std::string> args = get_args(*app, key); std::string filter = ""; if (args.size() > 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "Intent args = %s", args[0].c_str()); filter += args[0]; } else { __android_log_print(ANDROID_LOG_INFO, appTag, "No Intent args detected"); } int argc = 2; char *argv[] = {(char *)"foo", (char *)filter.c_str()}; __android_log_print(ANDROID_LOG_DEBUG, appTag, "filter = %s", argv[1]); // Route output to files until we can override the gtest output freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/out.txt", "w", stdout); freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/err.txt", "w", stderr); ::testing::InitGoogleTest(&argc, argv); ::testing::TestEventListeners &listeners = ::testing::UnitTest::GetInstance()->listeners(); listeners.Append(new LogcatPrinter); VkTestFramework::InitArgs(&argc, argv); ::testing::AddGlobalTestEnvironment(new TestEnvironment); int result = RUN_ALL_TESTS(); if (result != 0) { __android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests FAILED ===="); } else { __android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests PASSED ===="); } VkTestFramework::Finish(); fclose(stdout); fclose(stderr); ANativeActivity_finish(app->activity); return; } } } #endif #if defined(_WIN32) && !defined(NDEBUG) #include <crtdbg.h> #endif int main(int argc, char **argv) { int result; #ifdef ANDROID int vulkanSupport = InitVulkan(); if (vulkanSupport == 0) return 1; #endif #if defined(_WIN32) && !defined(NDEBUG) _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif ::testing::InitGoogleTest(&argc, argv); VkTestFramework::InitArgs(&argc, argv); ::testing::AddGlobalTestEnvironment(new TestEnvironment); result = RUN_ALL_TESTS(); VkTestFramework::Finish(); return result; }
1
8,301
Given that we are (conceptually) searching across multiple self-dependencies, all we can say is that we didn't have a self dependency in which *both* source and dest masks were correct. Since the spec doesn't imagine this case, the valid usage statement assume we can differentiate only wrong source from only wrong dest within a *single* self-dependency -- which we can't across multiple (at least not meaningfully)... so we always return both the source/dest VUID's if we can't find a self-dependency in with *both* are correct.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -52,11 +52,11 @@ public class EthMiningTest { public void shouldReturnTrueWhenMiningCoordinatorExistsAndRunning() { final JsonRpcRequest request = requestWithParams(); final JsonRpcResponse expectedResponse = new JsonRpcSuccessResponse(request.getId(), true); - when(miningCoordinator.isRunning()).thenReturn(true); + when(miningCoordinator.isMining()).thenReturn(true); final JsonRpcResponse actualResponse = method.response(request); assertThat(actualResponse).isEqualToComparingFieldByField(expectedResponse); - verify(miningCoordinator).isRunning(); + verify(miningCoordinator).isMining(); verifyNoMoreInteractions(miningCoordinator); }
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.jsonrpc.internal.methods; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.JsonRpcRequest; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcResponse; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcSuccessResponse; import org.hyperledger.besu.ethereum.blockcreation.EthHashMiningCoordinator; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class EthMiningTest { @Mock private EthHashMiningCoordinator miningCoordinator; private EthMining method; private final String JSON_RPC_VERSION = "2.0"; private final String ETH_METHOD = "eth_mining"; @Before public void setUp() { method = new EthMining(miningCoordinator); } @Test public void returnsCorrectMethodName() { assertThat(method.getName()).isEqualTo(ETH_METHOD); } @Test public void shouldReturnTrueWhenMiningCoordinatorExistsAndRunning() { final JsonRpcRequest request = requestWithParams(); final JsonRpcResponse expectedResponse = new JsonRpcSuccessResponse(request.getId(), true); when(miningCoordinator.isRunning()).thenReturn(true); final JsonRpcResponse actualResponse = method.response(request); assertThat(actualResponse).isEqualToComparingFieldByField(expectedResponse); verify(miningCoordinator).isRunning(); verifyNoMoreInteractions(miningCoordinator); } @Test public void shouldReturnFalseWhenMiningCoordinatorExistsAndDisabled() { final JsonRpcRequest request = requestWithParams(); final JsonRpcResponse expectedResponse = new JsonRpcSuccessResponse(request.getId(), false); when(miningCoordinator.isRunning()).thenReturn(false); final JsonRpcResponse actualResponse = method.response(request); assertThat(actualResponse).isEqualToComparingFieldByField(expectedResponse); verify(miningCoordinator).isRunning(); verifyNoMoreInteractions(miningCoordinator); } private JsonRpcRequest requestWithParams(final Object... params) { return new JsonRpcRequest(JSON_RPC_VERSION, ETH_METHOD, params); } }
1
20,034
rename? MiningCoordinator always exists
hyperledger-besu
java
@@ -8,7 +8,8 @@ var ip = {}, net = require('net'), extIP = require('external-ip'), - plugins = require('../../../plugins/pluginManager.js'); + plugins = require('../../../plugins/pluginManager.js'), + offlineMode = plugins.getConfig("api").offline_mode; /** * Function to get the hostname/ip address/url to access dashboard
1
/** * Module returning hostname value * @module api/parts/mgmt/ip */ /** @lends module:api/parts/mgmt/ip */ var ip = {}, net = require('net'), extIP = require('external-ip'), plugins = require('../../../plugins/pluginManager.js'); /** * Function to get the hostname/ip address/url to access dashboard * @param {function} callback - callback function that returns the hostname */ ip.getHost = function(callback) { // If host is set in config.js use that, otherwise get the external IP from ifconfig.me var domain = plugins.getConfig("api").domain; if (typeof domain !== "undefined" && domain !== "") { if (domain.indexOf("://") === -1) { domain = "http://" + domain; } callback(false, stripTrailingSlash(domain)); } else { getIP(function(err, ipres) { if (err) { console.log(err); getNetworkIP(function(err2, ipaddress) { callback(err2, "http://" + ipaddress); }); } else { callback(err, "http://" + ipres); } }); } }; /** * Strip trailing slash * @param {string} str - string from which to remove trailing slash * @returns {string} modified string */ function stripTrailingSlash(str) { if (str.substr(str.length - 1) === '/') { return str.substr(0, str.length - 1); } return str; } var getIP = extIP({ timeout: 600, getIP: 'parallel' }); /** * Try to get ip address through network, by connecting to external resource * @param {function} callback - callback function that returns the ip address */ function getNetworkIP(callback) { var socket = net.createConnection(80, 'www.google.com'); socket.setTimeout(1000); socket.on('connect', function() { callback(undefined, socket.address().address); socket.end(); }); socket.on('error', function(e) { callback(e, 'localhost'); }); } module.exports = ip;
1
13,365
Here would be the same case you don't need to call `loadConfigs`, but you would need to reread configs using `getConfig` on each getHost function call, not once per file.
Countly-countly-server
js
@@ -486,6 +486,8 @@ func (d *Dir) Cleanup(ctx context.Context, fi *dokan.FileInfo) { defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if fi != nil && fi.IsDeleteOnClose() && d.parent != nil { + d.folder.fs.renameAndDeletionLock.Lock() + defer d.folder.fs.renameAndDeletionLock.Unlock() d.folder.fs.log.CDebugf(ctx, "Removing (Delete) dir in cleanup %s", d.name) err = d.folder.fs.config.KBFSOps().RemoveDir(ctx, d.parent, d.name)
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libdokan import ( "fmt" "strings" "sync" "time" "github.com/keybase/kbfs/dokan" "github.com/keybase/kbfs/libfs" "github.com/keybase/kbfs/libkbfs" "golang.org/x/net/context" ) // Folder represents KBFS top-level folders type Folder struct { fs *FS list *FolderList handleMu sync.RWMutex h *libkbfs.TlfHandle folderBranchMu sync.Mutex folderBranch libkbfs.FolderBranch // Protects the nodes map. mu sync.Mutex // Map KBFS nodes to FUSE nodes, to be able to handle multiple // lookups and incoming change notifications. A node is present // here if the kernel holds a reference to it. // // If we ever support hardlinks, this would need refcounts. // // Children must call folder.forgetChildLocked on receiving the // FUSE Forget request. nodes map[libkbfs.NodeID]dokan.File // Protects the updateChan. updateMu sync.Mutex // updateChan is non-nil when the user disables updates via the // file system. Sending a struct{}{} on this channel will unpause // the updates. updateChan chan<- struct{} // noForget is turned on when the folder may not be forgotten // because it has attached special file state with it. noForget bool } func newFolder(fl *FolderList, h *libkbfs.TlfHandle) *Folder { f := &Folder{ fs: fl.fs, list: fl, h: h, nodes: map[libkbfs.NodeID]dokan.File{}, } return f } func (f *Folder) name() libkbfs.CanonicalTlfName { f.handleMu.RLock() defer f.handleMu.RUnlock() return f.h.GetCanonicalName() } func (f *Folder) setFolderBranch(folderBranch libkbfs.FolderBranch) error { f.folderBranchMu.Lock() defer f.folderBranchMu.Unlock() // TODO unregister all at unmount err := f.list.fs.config.Notifier().RegisterForChanges( []libkbfs.FolderBranch{folderBranch}, f) if err != nil { return err } f.folderBranch = folderBranch return nil } func (f *Folder) unsetFolderBranch(ctx context.Context) { f.folderBranchMu.Lock() defer f.folderBranchMu.Unlock() if f.folderBranch == (libkbfs.FolderBranch{}) { // Wasn't set. return } err := f.list.fs.config.Notifier().UnregisterFromChanges([]libkbfs.FolderBranch{f.folderBranch}, f) if err != nil { f.fs.log.Info("cannot unregister change notifier for folder %q: %v", f.name(), err) } f.folderBranch = libkbfs.FolderBranch{} } func (f *Folder) getFolderBranch() libkbfs.FolderBranch { f.folderBranchMu.Lock() defer f.folderBranchMu.Unlock() return f.folderBranch } // forgetNode forgets a formerly active child with basename name. func (f *Folder) forgetNode(node libkbfs.Node) { f.mu.Lock() defer f.mu.Unlock() delete(f.nodes, node.GetID()) if len(f.nodes) == 0 && !f.noForget { ctx := context.Background() f.unsetFolderBranch(ctx) f.list.forgetFolder(string(f.name())) } } func (f *Folder) reportErr(ctx context.Context, mode libkbfs.ErrorModeType, err error) { if err == nil { f.fs.log.CDebugf(ctx, "Request complete") return } f.fs.config.Reporter().ReportErr(ctx, f.name(), f.list.public, mode, err) // We just log the error as debug, rather than error, because it // might just indicate an expected error such as an ENOENT. // // TODO: Classify errors and escalate the logging level of the // important ones. f.fs.log.CDebugf(ctx, err.Error()) } func (f *Folder) lockedAddNode(node libkbfs.Node, val dokan.File) { f.mu.Lock() f.nodes[node.GetID()] = val f.mu.Unlock() } // LocalChange is called for changes originating within in this process. func (f *Folder) LocalChange(ctx context.Context, node libkbfs.Node, write libkbfs.WriteRange) { f.fs.queueNotification(func() {}) } // BatchChanges is called for changes originating anywhere, including // other hosts. func (f *Folder) BatchChanges(ctx context.Context, changes []libkbfs.NodeChange) { f.fs.queueNotification(func() {}) } // TlfHandleChange is called when the name of a folder changes. func (f *Folder) TlfHandleChange(ctx context.Context, newHandle *libkbfs.TlfHandle) { // Handle in the background because we shouldn't lock during // the notification f.fs.queueNotification(func() { oldName := func() libkbfs.CanonicalTlfName { f.handleMu.Lock() defer f.handleMu.Unlock() oldName := f.h.GetCanonicalName() f.h = newHandle return oldName }() f.list.updateTlfName(ctx, string(oldName), string(newHandle.GetCanonicalName())) }) } func (f *Folder) resolve(ctx context.Context) (*libkbfs.TlfHandle, error) { // In case there were any unresolved assertions, try them again on // the first load. Otherwise, since we haven't subscribed to // updates yet for this folder, we might have missed a name // change. handle, err := f.h.ResolveAgain(ctx, f.fs.config.KBPKI()) if err != nil { return nil, err } eq, err := f.h.Equals(f.fs.config.Codec(), *handle) if err != nil { return nil, err } if !eq { // Make sure the name changes in the folder and the folder list f.TlfHandleChange(ctx, handle) } return handle, nil } // Dir represents KBFS subdirectories. type Dir struct { FSO } func newDir(folder *Folder, node libkbfs.Node, name string, parent libkbfs.Node) *Dir { d := &Dir{FSO{ name: name, parent: parent, folder: folder, node: node, }} d.refcount.Increase() return d } // GetFileInformation for dokan. func (d *Dir) GetFileInformation(ctx context.Context, fi *dokan.FileInfo) (st *dokan.Stat, err error) { d.folder.fs.logEnter(ctx, "Dir GetFileInformation") defer func() { d.folder.reportErr(ctx, libkbfs.ReadMode, err) }() return eiToStat(d.folder.fs.config.KBFSOps().Stat(ctx, d.node)) } // SetFileAttributes for Dokan. func (d *Dir) SetFileAttributes(ctx context.Context, fi *dokan.FileInfo, fileAttributes dokan.FileAttribute) error { d.folder.fs.logEnter(ctx, "Dir SetFileAttributes") // TODO handle attributes for real. return nil } // isNoSuchNameError checks for libkbfs.NoSuchNameError. func isNoSuchNameError(err error) bool { _, ok := err.(libkbfs.NoSuchNameError) return ok } // lastStr returns last string in a string slice or "" if the slice is empty. func lastStr(strs []string) string { if len(strs) == 0 { return "" } return strs[len(strs)-1] } // open tries to open a file. func (d *Dir) open(ctx context.Context, oc *openContext, path []string) (dokan.File, bool, error) { d.folder.fs.log.CDebugf(ctx, "Dir openDir %v", path) specialNode := handleTLFSpecialFile(lastStr(path), d.folder) if specialNode != nil { return specialNode, false, nil } origPath := path rootDir := d for len(path) > 0 { // Handle upper case filenames from junctions etc if c := lowerTranslateCandidate(oc, path[0]); c != "" { var hit string var nhits int d.FindFiles(ctx, nil, c, func(ns *dokan.NamedStat) error { if strings.ToLower(ns.Name) == c { hit = ns.Name nhits++ } return nil }) if nhits != 1 { return nil, false, dokan.ErrObjectNameNotFound } path[0] = hit } leaf := len(path) == 1 // Check if this is a per-file metainformation file, if so // return the corresponding SpecialReadFile. if leaf && strings.HasPrefix(path[0], libfs.FileInfoPrefix) { node, _, err := d.folder.fs.config.KBFSOps().Lookup(ctx, d.node, path[0][len(libfs.FileInfoPrefix):]) if err != nil { return nil, false, err } nmd, err := d.folder.fs.config.KBFSOps().GetNodeMetadata(ctx, node) if err != nil { return nil, false, err } return &SpecialReadFile{read: fileInfo(nmd).read, fs: d.folder.fs}, false, nil } newNode, de, err := d.folder.fs.config.KBFSOps().Lookup(ctx, d.node, path[0]) // If we are in the final component, check if it is a creation. if leaf { notFound := isNoSuchNameError(err) switch { case notFound && oc.isCreateDirectory(): return d.mkdir(ctx, oc, path[0]) case notFound && oc.isCreation(): return d.create(ctx, oc, path[0]) case !notFound && oc.isExistingError(): return nil, false, dokan.ErrFileAlreadyExists } } // Return errors from Lookup if err != nil { return nil, false, err } if newNode != nil { d.folder.mu.Lock() f, _ := d.folder.nodes[newNode.GetID()] d.folder.mu.Unlock() // Symlinks don't have stored nodes, so they are impossible here. switch x := f.(type) { default: return nil, false, fmt.Errorf("unhandled node type: %T", f) case nil: case *File: x.refcount.Increase() return openFile(ctx, oc, path, x) case *Dir: d = x path = path[1:] continue } } switch de.Type { default: return nil, false, fmt.Errorf("unhandled entry type: %v", de.Type) case libkbfs.File, libkbfs.Exec: child := newFile(d.folder, newNode, path[0], d.node) f, _, err := openFile(ctx, oc, path, child) if err == nil { d.folder.lockedAddNode(newNode, child) } return f, false, err case libkbfs.Dir: child := newDir(d.folder, newNode, path[0], d.node) d.folder.lockedAddNode(newNode, child) d = child path = path[1:] case libkbfs.Sym: return openSymlink(ctx, oc, d, rootDir, origPath, path, de.SymPath) } } if oc.mayNotBeDirectory() { return nil, true, dokan.ErrFileIsADirectory } d.refcount.Increase() return d, true, nil } type fileInfo libkbfs.NodeMetadata func (fi fileInfo) read(ctx context.Context) ([]byte, time.Time, error) { bs, err := libfs.PrettyJSON(fi) return bs, time.Time{}, err } func openFile(ctx context.Context, oc *openContext, path []string, f *File) (dokan.File, bool, error) { var err error // Files only allowed as leafs... if len(path) > 1 { return nil, false, dokan.ErrObjectNameNotFound } if oc.isTruncate() { err = f.folder.fs.config.KBFSOps().Truncate(ctx, f.node, 0) } if err != nil { return nil, false, err } return f, false, nil } func openSymlink(ctx context.Context, oc *openContext, parent *Dir, rootDir *Dir, origPath, path []string, target string) (dokan.File, bool, error) { if !oc.reduceRedirectionsLeft() { return nil, false, dokan.ErrObjectNameNotFound } // Take relevant prefix of original path. origPath = origPath[:len(origPath)-len(path)] if len(path) == 1 && oc.isOpenReparsePoint() { // a Symlink is never included in Folder.nodes, as it doesn't // have a libkbfs.Node to keep track of renames. // Here we may get an error if the symlink destination does not exist. // which is fine, treat such non-existing targets as symlinks to a file. isDir, err := resolveSymlinkIsDir(ctx, oc, rootDir, origPath, target) parent.folder.fs.log.CDebugf(ctx, "openSymlink leaf returned %v,%v => %v,%v", origPath, target, isDir, err) return &Symlink{parent: parent, name: path[0], isTargetADirectory: isDir}, isDir, nil } // reference symlink, symbolic links always use '/' instead of '\'. if target == "" || target[0] == '/' { return nil, false, dokan.ErrNotSupported } dst, err := resolveSymlinkPath(ctx, origPath, target) parent.folder.fs.log.CDebugf(ctx, "openSymlink resolve returned %v,%v => %v,%v", origPath, target, dst, err) if err != nil { return nil, false, err } dst = append(dst, path[1:]...) return rootDir.open(ctx, oc, dst) } func getExclFromOpenContext(oc *openContext) libkbfs.Excl { return libkbfs.Excl(oc.CreateDisposition == dokan.FileCreate) } func (d *Dir) create(ctx context.Context, oc *openContext, name string) (f dokan.File, isDir bool, err error) { d.folder.fs.log.CDebugf(ctx, "Dir Create %s", name) defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }() isExec := false // Windows lacks executable modes. excl := getExclFromOpenContext(oc) newNode, _, err := d.folder.fs.config.KBFSOps().CreateFile( ctx, d.node, name, isExec, excl) if err != nil { return nil, false, err } child := newFile(d.folder, newNode, name, d.node) d.folder.lockedAddNode(newNode, child) return child, false, nil } func (d *Dir) mkdir(ctx context.Context, oc *openContext, name string) (f *Dir, isDir bool, err error) { d.folder.fs.log.CDebugf(ctx, "Dir Mkdir %s", name) defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }() newNode, _, err := d.folder.fs.config.KBFSOps().CreateDir( ctx, d.node, name) if err != nil { return nil, false, err } child := newDir(d.folder, newNode, name, d.node) d.folder.lockedAddNode(newNode, child) return child, true, nil } // FindFiles does readdir for dokan. func (d *Dir) FindFiles(ctx context.Context, fi *dokan.FileInfo, ignored string, callback func(*dokan.NamedStat) error) (err error) { d.folder.fs.logEnter(ctx, "Dir FindFiles") defer func() { d.folder.reportErr(ctx, libkbfs.ReadMode, err) }() children, err := d.folder.fs.config.KBFSOps().GetDirChildren(ctx, d.node) if err != nil { return err } empty := true var ns dokan.NamedStat for name, de := range children { empty = false ns.Name = name // TODO perhaps resolve symlinks here? fillStat(&ns.Stat, &de) err = callback(&ns) if err != nil { return err } } if empty { return dokan.ErrObjectNameNotFound } return nil } // CanDeleteDirectory - return just nil // TODO check for permissions here. func (d *Dir) CanDeleteDirectory(ctx context.Context, fi *dokan.FileInfo) (err error) { d.folder.fs.logEnterf(ctx, "Dir CanDeleteDirectory %q", d.name) defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }() children, err := d.folder.fs.config.KBFSOps().GetDirChildren(ctx, d.node) if err != nil { return errToDokan(err) } if len(children) > 0 { return dokan.ErrDirectoryNotEmpty } return nil } // Cleanup - forget references, perform deletions etc. func (d *Dir) Cleanup(ctx context.Context, fi *dokan.FileInfo) { var err error if fi != nil { d.folder.fs.logEnterf(ctx, "Dir Cleanup %q delete=%v", d.name, fi.IsDeleteOnClose()) } else { d.folder.fs.logEnterf(ctx, "Dir Cleanup %q", d.name) } defer func() { d.folder.reportErr(ctx, libkbfs.WriteMode, err) }() if fi != nil && fi.IsDeleteOnClose() && d.parent != nil { d.folder.fs.log.CDebugf(ctx, "Removing (Delete) dir in cleanup %s", d.name) err = d.folder.fs.config.KBFSOps().RemoveDir(ctx, d.parent, d.name) } if d.refcount.Decrease() { d.folder.forgetNode(d.node) } } func resolveSymlinkPath(ctx context.Context, origPath []string, targetPath string) ([]string, error) { pathComponents := make([]string, len(origPath), len(origPath)+1) copy(pathComponents, origPath) for _, p := range strings.FieldsFunc(targetPath, isPathSeparator) { switch p { case ".": case "..": if len(pathComponents) == 0 { return nil, dokan.ErrNotSupported } pathComponents = pathComponents[:len(pathComponents)-1] default: pathComponents = append(pathComponents, p) } } return pathComponents, nil } func resolveSymlinkIsDir(ctx context.Context, oc *openContext, rootDir *Dir, origPath []string, targetPath string) (bool, error) { dst, err := resolveSymlinkPath(ctx, origPath, targetPath) if err != nil { return false, err } obj, isDir, err := rootDir.open(ctx, oc, dst) if err == nil { obj.Cleanup(ctx, nil) } return isDir, err } func isPathSeparator(r rune) bool { return r == '/' || r == '\\' } func asDir(ctx context.Context, f dokan.File) *Dir { switch x := f.(type) { case *Dir: return x case *TLF: d, _, _ := x.loadDirHelper(ctx, "asDir", libkbfs.WriteMode, false) return d } return nil }
1
13,598
Unlocking with defer means that this lock is still held curing the call to forgetNode(), below, which I see attempts to acquire `f.mu`, which looks dangerous to me.
keybase-kbfs
go
@@ -56,7 +56,7 @@ func Test_Mine(t *testing.T) { baseBlock := &block.Block{Height: 2, StateRoot: stateRoot, Tickets: []block.Ticket{{VRFProof: []byte{0}}}} tipSet := th.RequireNewTipSet(t, baseBlock) - st, pool, addrs, cst, bs := sharedSetup(t, mockSignerVal) + st, pool, addrs, _, bs := sharedSetup(t, mockSignerVal) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil }
1
package mining_test import ( "context" "errors" "testing" "time" "github.com/filecoin-project/go-bls-sigs" "github.com/filecoin-project/go-filecoin/block" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-hamt-ipld" "github.com/ipfs/go-ipfs-blockstore" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/actor" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/chain" "github.com/filecoin-project/go-filecoin/clock" "github.com/filecoin-project/go-filecoin/config" "github.com/filecoin-project/go-filecoin/consensus" "github.com/filecoin-project/go-filecoin/message" "github.com/filecoin-project/go-filecoin/mining" "github.com/filecoin-project/go-filecoin/state" th "github.com/filecoin-project/go-filecoin/testhelpers" tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags" "github.com/filecoin-project/go-filecoin/types" ) type mockTicketGen struct { ticketGen bool timeNotarized bool } func (mtg *mockTicketGen) NextTicket(ticket block.Ticket, genAddr address.Address, signer types.Signer) (block.Ticket, error) { mtg.ticketGen = true return consensus.MakeFakeTicketForTest(), nil } func (mtg *mockTicketGen) NotarizeTime(ticket *block.Ticket) error { mtg.timeNotarized = true return nil } func Test_Mine(t *testing.T) { tf.UnitTest(t) mockSignerVal, blockSignerAddr := setupSigner() mockSigner := &mockSignerVal newCid := types.NewCidForTestGetter() stateRoot := newCid() baseBlock := &block.Block{Height: 2, StateRoot: stateRoot, Tickets: []block.Ticket{{VRFProof: []byte{0}}}} tipSet := th.RequireNewTipSet(t, baseBlock) st, pool, addrs, cst, bs := sharedSetup(t, mockSignerVal) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil } getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } minerAddr := addrs[3] // addr4 in sharedSetup minerOwnerAddr := addrs[4] // addr5 in sharedSetup messages := chain.NewMessageStore(cst) // TODO #3311: this case isn't testing much. Testing w.Mine further needs a lot more attention. t.Run("Trivial success case", func(t *testing.T) { testTicketGen := &mockTicketGen{} ctx, cancel := context.WithCancel(context.Background()) outCh := make(chan mining.Output) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: minerAddr, MinerOwnerAddr: minerOwnerAddr, WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: testTicketGen, MessageSource: pool, Processor: th.NewFakeProcessor(), Blockstore: bs, MessageStore: messages, Clock: clock.NewSystemClock(), }) go worker.Mine(ctx, tipSet, []block.Ticket{}, outCh) r := <-outCh assert.NoError(t, r.Err) assert.True(t, testTicketGen.ticketGen) assert.True(t, testTicketGen.timeNotarized) cancel() }) t.Run("Block generation fails", func(t *testing.T) { testTicketGen := &mockTicketGen{} ctx, cancel := context.WithCancel(context.Background()) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: minerAddr, MinerOwnerAddr: minerOwnerAddr, WorkerSigner: mockSigner, GetStateTree: makeExplodingGetStateTree(st), GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: testTicketGen, MessageSource: pool, Processor: th.NewFakeProcessor(), Blockstore: bs, MessageStore: messages, Clock: clock.NewSystemClock(), }) outCh := make(chan mining.Output) go worker.Mine(ctx, tipSet, []block.Ticket{}, outCh) r := <-outCh assert.EqualError(t, r.Err, "generate flush state tree: boom no flush") assert.True(t, testTicketGen.ticketGen) assert.True(t, testTicketGen.timeNotarized) cancel() }) t.Run("Sent empty tipset", func(t *testing.T) { testTicketGen := &mockTicketGen{} ctx, cancel := context.WithCancel(context.Background()) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: minerAddr, MinerOwnerAddr: minerOwnerAddr, WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: testTicketGen, MessageSource: pool, Processor: th.NewFakeProcessor(), Blockstore: bs, MessageStore: messages, Clock: clock.NewSystemClock(), }) input := block.TipSet{} outCh := make(chan mining.Output) go worker.Mine(ctx, input, []block.Ticket{}, outCh) r := <-outCh assert.EqualError(t, r.Err, "bad input tipset with no blocks sent to Mine()") assert.False(t, testTicketGen.ticketGen) assert.False(t, testTicketGen.timeNotarized) cancel() }) } func sharedSetupInitial() (*hamt.CborIpldStore, *message.Pool, cid.Cid) { cst := hamt.NewCborStore() pool := message.NewPool(config.NewDefaultConfig().Mpool, th.NewMockMessagePoolValidator()) // Install the fake actor so we can execute it. fakeActorCodeCid := types.AccountActorCodeCid return cst, pool, fakeActorCodeCid } func sharedSetup(t *testing.T, mockSigner types.MockSigner) ( state.Tree, *message.Pool, []address.Address, *hamt.CborIpldStore, blockstore.Blockstore) { cst, pool, fakeActorCodeCid := sharedSetupInitial() vms := th.VMStorage() d := datastore.NewMapDatastore() bs := blockstore.NewBlockstore(d) // TODO: We don't need fake actors here, so these could be made real. // And the NetworkAddress actor can/should be the real one. // Stick two fake actors in the state tree so they can talk. // Now tracking in #3311 addr1, addr2, addr3, addr4, addr5 := mockSigner.Addresses[0], mockSigner.Addresses[1], mockSigner.Addresses[2], mockSigner.Addresses[3], mockSigner.Addresses[4] act1 := th.RequireNewFakeActor(t, vms, addr1, fakeActorCodeCid) act2 := th.RequireNewFakeActor(t, vms, addr2, fakeActorCodeCid) fakeNetAct := th.RequireNewFakeActorWithTokens(t, vms, addr3, fakeActorCodeCid, types.NewAttoFILFromFIL(1000000)) minerAct := th.RequireNewMinerActor(t, vms, addr4, addr5, 10, th.RequireRandomPeerID(t), types.NewAttoFILFromFIL(10000)) minerOwner := th.RequireNewFakeActor(t, vms, addr5, fakeActorCodeCid) _, st := th.RequireMakeStateTree(t, cst, map[address.Address]*actor.Actor{ // Ensure core.NetworkAddress exists to prevent mining reward failures. address.NetworkAddress: fakeNetAct, addr1: act1, addr2: act2, addr4: minerAct, addr5: minerOwner, }) return st, pool, []address.Address{addr1, addr2, addr3, addr4, addr5}, cst, bs } // TODO this test belongs in core, it calls ApplyMessages #3311 func TestApplyMessagesForSuccessTempAndPermFailures(t *testing.T) { tf.UnitTest(t) vms := th.VMStorage() mockSigner, _ := setupSigner() cst, _, fakeActorCodeCid := sharedSetupInitial() // Stick two fake actors in the state tree so they can talk. addr1, addr2 := mockSigner.Addresses[0], mockSigner.Addresses[1] act1 := th.RequireNewFakeActor(t, vms, addr1, fakeActorCodeCid) _, st := th.RequireMakeStateTree(t, cst, map[address.Address]*actor.Actor{ address.NetworkAddress: th.RequireNewAccountActor(t, types.NewAttoFILFromFIL(1000000)), addr1: act1, }) ctx := context.Background() // NOTE: it is important that each category (success, temporary failure, permanent failure) is represented below. // If a given message's category changes in the future, it needs to be replaced here in tests by another so we fully // exercise the categorization. // addr2 doesn't correspond to an extant account, so this will trigger errAccountNotFound -- a temporary failure. msg1 := types.NewMeteredMessage(addr2, addr1, 0, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg1, err := types.NewSignedMessage(*msg1, &mockSigner) require.NoError(t, err) // This is actually okay and should result in a receipt msg2 := types.NewMeteredMessage(addr1, addr2, 0, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg2, err := types.NewSignedMessage(*msg2, &mockSigner) require.NoError(t, err) // The following two are sending to self -- errSelfSend, a permanent error. msg3 := types.NewMeteredMessage(addr1, addr1, 1, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg3, err := types.NewSignedMessage(*msg3, &mockSigner) require.NoError(t, err) msg4 := types.NewMeteredMessage(addr2, addr2, 1, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg4, err := types.NewSignedMessage(*msg4, &mockSigner) require.NoError(t, err) messages := []*types.SignedMessage{smsg1, smsg2, smsg3, smsg4} res, err := consensus.NewDefaultProcessor().ApplyMessagesAndPayRewards(ctx, st, vms, messages, addr1, types.NewBlockHeight(0), nil) require.NotNil(t, res) assert.Len(t, res.PermanentFailures, 2) assert.Contains(t, res.PermanentFailures, smsg3) assert.Contains(t, res.PermanentFailures, smsg4) assert.Len(t, res.TemporaryFailures, 1) assert.Contains(t, res.TemporaryFailures, smsg1) assert.Len(t, res.Results, 1) assert.Contains(t, res.SuccessfulMessages, smsg2) assert.NoError(t, err) } func TestApplyBLSMessages(t *testing.T) { tf.UnitTest(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() ki := types.MustGenerateMixedKeyInfo(5, 5) mockSignerVal := types.NewMockSigner(ki) mockSigner := &mockSignerVal newCid := types.NewCidForTestGetter() stateRoot := newCid() baseBlock := &block.Block{Height: 2, StateRoot: stateRoot, Tickets: []block.Ticket{{VRFProof: []byte{0}}}} tipSet := th.RequireNewTipSet(t, baseBlock) st, pool, addrs, cst, bs := sharedSetup(t, mockSignerVal) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil } getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } msgStore := chain.NewMessageStore(cst) // assert that first two addresses have different protocols blsAddress := addrs[0] assert.Equal(t, address.BLS, blsAddress.Protocol()) secpAddress := addrs[1] assert.Equal(t, address.SECP256K1, secpAddress.Protocol()) // create secp and bls signed messages interleaved for i := 0; i < 10; i++ { var addr address.Address if i%2 == 0 { addr = blsAddress } else { addr = secpAddress } smsg := requireSignedMessage(t, mockSigner, addr, addrs[3], uint64(i/2), types.NewAttoFILFromFIL(1)) _, err := pool.Add(ctx, smsg, uint64(0)) require.NoError(t, err) } testTicketGen := &mockTicketGen{} worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(mockSigner.Addresses[5]), MinerAddr: addrs[3], MinerOwnerAddr: addrs[4], WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: testTicketGen, MessageSource: pool, Processor: th.NewFakeProcessor(), Blockstore: bs, MessageStore: msgStore, Clock: clock.NewSystemClock(), }) outCh := make(chan mining.Output) go worker.Mine(ctx, tipSet, []block.Ticket{}, outCh) r := <-outCh require.NoError(t, r.Err) block := r.NewBlock t.Run("messages are divided into bls and secp messages", func(t *testing.T) { secpMessages, blsMessages, err := msgStore.LoadMessages(ctx, block.Messages) require.NoError(t, err) assert.Len(t, secpMessages, 5) assert.Len(t, blsMessages, 5) for _, msg := range secpMessages { assert.Equal(t, address.SECP256K1, msg.Message.From.Protocol()) } for _, msg := range blsMessages { assert.Equal(t, address.BLS, msg.From.Protocol()) } }) t.Run("all 10 messages are executed", func(t *testing.T) { receipts, err := msgStore.LoadReceipts(ctx, block.MessageReceipts) require.NoError(t, err) assert.Len(t, receipts, 10) }) t.Run("block bls signature can be used to validate messages", func(t *testing.T) { digests := []bls.Digest{} keys := []bls.PublicKey{} _, blsMessages, err := msgStore.LoadMessages(ctx, block.Messages) require.NoError(t, err) for _, msg := range blsMessages { msgBytes, err := msg.Marshal() require.NoError(t, err) digests = append(digests, bls.Hash(msgBytes)) pubKey := bls.PublicKey{} copy(pubKey[:], msg.From.Payload()) keys = append(keys, pubKey) } blsSig := bls.Signature{} copy(blsSig[:], block.BLSAggregateSig) valid := bls.Verify(&blsSig, digests, keys) assert.True(t, valid) }) } func requireSignedMessage(t *testing.T, signer types.Signer, from, to address.Address, nonce uint64, value types.AttoFIL) *types.SignedMessage { msg := types.NewMeteredMessage(from, to, nonce, value, "", []byte{}, types.NewAttoFILFromFIL(1), 300) smsg, err := types.NewSignedMessage(*msg, signer) require.NoError(t, err) return smsg } func TestGenerateMultiBlockTipSet(t *testing.T) { tf.UnitTest(t) ctx := context.Background() mockSigner, blockSignerAddr := setupSigner() st, pool, addrs, cst, bs := sharedSetup(t, mockSigner) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil } getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } minerAddr := addrs[4] minerOwnerAddr := addrs[3] messages := chain.NewMessageStore(cst) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: minerAddr, MinerOwnerAddr: minerOwnerAddr, WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: &consensus.FakeTicketMachine{}, MessageSource: pool, Processor: th.NewFakeProcessor(), Blockstore: bs, MessageStore: messages, Clock: th.NewFakeClock(time.Unix(1234567890, 0)), }) builder := chain.NewBuilder(t, address.Undef) genesis := builder.NewGenesis() parentTipset := builder.AppendManyOn(99, genesis) baseTipset := builder.AppendOn(parentTipset, 2) assert.Equal(t, 2, baseTipset.Len()) blk, err := worker.Generate(ctx, baseTipset, []block.Ticket{{VRFProof: []byte{2}}}, consensus.MakeFakeElectionProofForTest(), 0) assert.NoError(t, err) assert.Equal(t, types.EmptyMessagesCID, blk.Messages.SecpRoot) assert.Equal(t, types.EmptyReceiptsCID, blk.MessageReceipts) assert.Equal(t, types.Uint64(101), blk.Height) assert.Equal(t, types.Uint64(120), blk.ParentWeight) assert.Equal(t, block.Ticket{VRFProof: []byte{2}}, blk.Tickets[0]) } // After calling Generate, do the new block and new state of the message pool conform to our expectations? func TestGeneratePoolBlockResults(t *testing.T) { tf.UnitTest(t) ctx := context.Background() mockSigner, blockSignerAddr := setupSigner() newCid := types.NewCidForTestGetter() st, pool, addrs, cst, bs := sharedSetup(t, mockSigner) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil } getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } messages := chain.NewMessageStore(cst) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: addrs[4], MinerOwnerAddr: addrs[3], WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: &consensus.FakeTicketMachine{}, MessageSource: pool, Processor: consensus.NewDefaultProcessor(), Blockstore: bs, MessageStore: messages, Clock: th.NewFakeClock(time.Unix(1234567890, 0)), }) // addr3 doesn't correspond to an extant account, so this will trigger errAccountNotFound -- a temporary failure. msg1 := types.NewMeteredMessage(addrs[2], addrs[0], 0, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg1, err := types.NewSignedMessage(*msg1, &mockSigner) require.NoError(t, err) // This is actually okay and should result in a receipt msg2 := types.NewMeteredMessage(addrs[0], addrs[1], 0, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg2, err := types.NewSignedMessage(*msg2, &mockSigner) require.NoError(t, err) // add the following and then increment the actor nonce at addrs[1], nonceTooLow, a permanent error. msg3 := types.NewMeteredMessage(addrs[1], addrs[0], 0, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg3, err := types.NewSignedMessage(*msg3, &mockSigner) require.NoError(t, err) msg4 := types.NewMeteredMessage(addrs[1], addrs[2], 1, types.ZeroAttoFIL, "", nil, types.NewGasPrice(1), types.NewGasUnits(0)) smsg4, err := types.NewSignedMessage(*msg4, &mockSigner) require.NoError(t, err) _, err = pool.Add(ctx, smsg1, 0) assert.NoError(t, err) _, err = pool.Add(ctx, smsg2, 0) assert.NoError(t, err) _, err = pool.Add(ctx, smsg3, 0) assert.NoError(t, err) _, err = pool.Add(ctx, smsg4, 0) assert.NoError(t, err) assert.Len(t, pool.Pending(), 4) // Set actor nonce past nonce of message in pool. // Have to do this here to get a permanent error in the pool. act, err := st.GetActor(ctx, addrs[1]) require.NoError(t, err) act.Nonce = types.Uint64(2) err = st.SetActor(ctx, addrs[1], act) require.NoError(t, err) stateRoot, err := st.Flush(ctx) require.NoError(t, err) baseBlock := block.Block{ Parents: block.NewTipSetKey(newCid()), Height: types.Uint64(100), StateRoot: stateRoot, ElectionProof: consensus.MakeFakeElectionProofForTest(), } blk, err := worker.Generate(ctx, th.RequireNewTipSet(t, &baseBlock), []block.Ticket{{VRFProof: []byte{0}}}, consensus.MakeFakeElectionProofForTest(), 0) assert.NoError(t, err) // This is the temporary failure + the good message, // which will be removed by the node if this block is accepted. assert.Len(t, pool.Pending(), 2) assert.Contains(t, pool.Pending(), smsg1) assert.Contains(t, pool.Pending(), smsg2) // message and receipts can be loaded from message store and have // length 1. msgs, _, err := messages.LoadMessages(ctx, blk.Messages) require.NoError(t, err) assert.Len(t, msgs, 1) // This is the good message rcpts, err := messages.LoadReceipts(ctx, blk.MessageReceipts) require.NoError(t, err) assert.Len(t, rcpts, 1) } func TestGenerateSetsBasicFields(t *testing.T) { tf.UnitTest(t) ctx := context.Background() mockSigner, blockSignerAddr := setupSigner() newCid := types.NewCidForTestGetter() st, pool, addrs, cst, bs := sharedSetup(t, mockSigner) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil } getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } minerAddr := addrs[4] minerOwnerAddr := addrs[3] messages := chain.NewMessageStore(cst) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: minerAddr, MinerOwnerAddr: minerOwnerAddr, WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: &consensus.FakeTicketMachine{}, MessageSource: pool, Processor: consensus.NewDefaultProcessor(), Blockstore: bs, MessageStore: messages, Clock: th.NewFakeClock(time.Unix(1234567890, 0)), }) h := types.Uint64(100) w := types.Uint64(1000) baseBlock := block.Block{ Height: h, ParentWeight: w, StateRoot: newCid(), ElectionProof: consensus.MakeFakeElectionProofForTest(), } baseTipSet := th.RequireNewTipSet(t, &baseBlock) tArr := []block.Ticket{mining.NthTicket(1), mining.NthTicket(3), mining.NthTicket(3), mining.NthTicket(7)} blk, err := worker.Generate(ctx, baseTipSet, tArr, consensus.MakeFakeElectionProofForTest(), 0) assert.NoError(t, err) assert.Equal(t, h+1, blk.Height) assert.Equal(t, minerAddr, blk.Miner) assert.Equal(t, tArr, blk.Tickets) blk, err = worker.Generate(ctx, baseTipSet, []block.Ticket{{VRFProof: []byte{0}}}, consensus.MakeFakeElectionProofForTest(), 1) assert.NoError(t, err) assert.Equal(t, h+2, blk.Height) assert.Equal(t, w+10.0, blk.ParentWeight) assert.Equal(t, minerAddr, blk.Miner) } func TestGenerateWithoutMessages(t *testing.T) { tf.UnitTest(t) ctx := context.Background() mockSigner, blockSignerAddr := setupSigner() newCid := types.NewCidForTestGetter() st, pool, addrs, cst, bs := sharedSetup(t, mockSigner) getStateTree := func(c context.Context, ts block.TipSet) (state.Tree, error) { return st, nil } getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } messages := chain.NewMessageStore(cst) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: addrs[4], MinerOwnerAddr: addrs[3], WorkerSigner: mockSigner, GetStateTree: getStateTree, GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: &consensus.FakeTicketMachine{}, MessageSource: pool, Processor: consensus.NewDefaultProcessor(), Blockstore: bs, MessageStore: messages, Clock: th.NewFakeClock(time.Unix(1234567890, 0)), }) assert.Len(t, pool.Pending(), 0) baseBlock := block.Block{ Parents: block.NewTipSetKey(newCid()), Height: types.Uint64(100), StateRoot: newCid(), ElectionProof: consensus.MakeFakeElectionProofForTest(), } blk, err := worker.Generate(ctx, th.RequireNewTipSet(t, &baseBlock), []block.Ticket{{VRFProof: []byte{0}}}, consensus.MakeFakeElectionProofForTest(), 0) assert.NoError(t, err) assert.Len(t, pool.Pending(), 0) // This is the temporary failure. assert.Equal(t, types.SignedMessageCollection{}.Cid(), blk.Messages.SecpRoot) assert.Equal(t, types.ReceiptCollection{}.Cid(), blk.MessageReceipts) } // If something goes wrong while generating a new block, even as late as when flushing it, // no block should be returned, and the message pool should not be pruned. func TestGenerateError(t *testing.T) { tf.UnitTest(t) ctx := context.Background() mockSigner, blockSignerAddr := setupSigner() newCid := types.NewCidForTestGetter() st, pool, addrs, cst, bs := sharedSetup(t, mockSigner) getAncestors := func(ctx context.Context, ts block.TipSet, newBlockHeight *types.BlockHeight) ([]block.TipSet, error) { return nil, nil } messages := chain.NewMessageStore(cst) worker := mining.NewDefaultWorker(mining.WorkerParameters{ API: th.NewDefaultFakeWorkerPorcelainAPI(blockSignerAddr), MinerAddr: addrs[4], MinerOwnerAddr: addrs[3], WorkerSigner: mockSigner, GetStateTree: makeExplodingGetStateTree(st), GetWeight: getWeightTest, GetAncestors: getAncestors, Election: &consensus.FakeElectionMachine{}, TicketGen: &consensus.FakeTicketMachine{}, MessageSource: pool, Processor: consensus.NewDefaultProcessor(), Blockstore: bs, MessageStore: messages, Clock: th.NewFakeClock(time.Unix(1234567890, 0)), }) // This is actually okay and should result in a receipt msg := types.NewMeteredMessage(addrs[0], addrs[1], 0, types.ZeroAttoFIL, "", nil, types.NewGasPrice(0), types.NewGasUnits(0)) smsg, err := types.NewSignedMessage(*msg, &mockSigner) require.NoError(t, err) _, err = pool.Add(ctx, smsg, 0) require.NoError(t, err) assert.Len(t, pool.Pending(), 1) baseBlock := block.Block{ Parents: block.NewTipSetKey(newCid()), Height: types.Uint64(100), StateRoot: newCid(), ElectionProof: consensus.MakeFakeElectionProofForTest(), } baseTipSet := th.RequireNewTipSet(t, &baseBlock) blk, err := worker.Generate(ctx, baseTipSet, []block.Ticket{{VRFProof: []byte{0}}}, consensus.MakeFakeElectionProofForTest(), 0) assert.Error(t, err, "boom") assert.Nil(t, blk) assert.Len(t, pool.Pending(), 1) // No messages are removed from the pool. } type stateTreeForTest struct { state.Tree TestFlush func(ctx context.Context) (cid.Cid, error) } func wrapStateTreeForTest(st state.Tree) *stateTreeForTest { stt := stateTreeForTest{ st, st.Flush, } return &stt } func (st *stateTreeForTest) Flush(ctx context.Context) (cid.Cid, error) { return st.TestFlush(ctx) } func getWeightTest(_ context.Context, ts block.TipSet) (uint64, error) { w, err := ts.ParentWeight() if err != nil { return uint64(0), err } // consensus.ecV = 10 return w + uint64(ts.Len())*10, nil } func makeExplodingGetStateTree(st state.Tree) func(context.Context, block.TipSet) (state.Tree, error) { return func(c context.Context, ts block.TipSet) (state.Tree, error) { stt := wrapStateTreeForTest(st) stt.TestFlush = func(ctx context.Context) (cid.Cid, error) { return cid.Undef, errors.New("boom no flush") } return stt, nil } } func setupSigner() (types.MockSigner, address.Address) { mockSigner, _ := types.NewMockSignersAndKeyInfo(10) signerAddr := mockSigner.Addresses[len(mockSigner.Addresses)-1] return mockSigner, signerAddr }
1
21,912
Does anyone still use the cst out of this method? If not consider deleting
filecoin-project-venus
go
@@ -9,6 +9,7 @@ get( "/getting-started-with-ios-development?utm_source=podcast" ) ) +get "/videos/vim-for-rails-developers" => redirect("https://www.youtube.com/watch?v=9J2OjH8Ao_A") get "/humans-present/oss" => redirect( "https://www.youtube.com/watch?v=VMBhumlUP-A") get "/ios-on-rails" => redirect("https://gumroad.com/l/ios-on-rails") get "/ios-on-rails-beta" => redirect("https://gumroad.com/l/ios-on-rails")
1
get "/5by5" => redirect("/design-for-developers?utm_source=5by5") get "/:id/articles" => redirect("http://robots.thoughtbot.com/tags/%{id}") get "/backbone-js-on-rails" => redirect("https://gumroad.com/l/backbone-js-on-rails") get "/courses/:id" => redirect("/%{id}") get "/d4d-resources" => redirect("/design-for-developers-resources") get "/geocoding-on-rails" => redirect("https://gumroad.com/l/geocoding-on-rails") get( "/gettingstartedwithios" => redirect( "/getting-started-with-ios-development?utm_source=podcast" ) ) get "/humans-present/oss" => redirect( "https://www.youtube.com/watch?v=VMBhumlUP-A") get "/ios-on-rails" => redirect("https://gumroad.com/l/ios-on-rails") get "/ios-on-rails-beta" => redirect("https://gumroad.com/l/ios-on-rails") get "/live" => redirect("http://forum.upcase.com") get "/pages/tmux" => redirect("https://www.youtube.com/watch?v=CKC8Ph-s2F4") get "/prime" => redirect("/") get "/subscribe" => redirect("/") get "/products/:id/purchases/:lookup" => redirect("/purchases/%{lookup}") get "/ruby-science" => redirect("https://gumroad.com/l/ruby-science") get "/workshops/:id" => redirect("/%{id}") get "/dashboard" => redirect("/practice") get "/test-driven+development" => redirect("/testing") get "/test-driven+development/resources" => redirect("/testing/resources") get "/clean+code" => redirect("/clean-code") get "/clean+code/resources" => redirect("/clean-code/resources") get "/ruby" => redirect("/rails") get "/rubymotion" => redirect("/ios") get "/swift" => redirect("/ios") get "/git" => redirect("/workflow") get "/heroku" => redirect("/workflow") get "/sql" => redirect("/workflow") get "/unix" => redirect("/workflow") get "/typography" => redirect("/design") get "/visual-principles" => redirect("/design") get "/web+design" => redirect("/design") get "/grids" => redirect("/design") get "/html-css" => redirect("/design") get "/sass" => redirect("/design") get "/products" => redirect("/practice") if Rails.env.staging? || Rails.env.production? get( "/products/:id" => redirect("/test-driven-rails"), constraints: { id: /(10|12).*/ } ) get( "/products/:id" => redirect("/design-for-developers"), constraints: { id: /(9|11).*/ } ) get( "/products/:id" => redirect("https://www.youtube.com/watch?v=CKC8Ph-s2F4"), constraints: { id: /(4).*/ } ) get "/products/14" => redirect("/prime") get "/products/14-prime" => redirect("/prime") end
1
15,166
Line is too long. [97/80]
thoughtbot-upcase
rb
@@ -1,12 +1,14 @@ """ Testing for data_transfer.py """ ### Python imports +from io import BytesIO import pathlib from unittest import mock ### Third-party imports from botocore.stub import ANY +from botocore.exceptions import ReadTimeoutError import pandas as pd import pytest
1
""" Testing for data_transfer.py """ ### Python imports import pathlib from unittest import mock ### Third-party imports from botocore.stub import ANY import pandas as pd import pytest ### Project imports from quilt3 import data_transfer from quilt3.util import PhysicalKey from .utils import QuiltTestCase ### Code # parquet test moved to test_formats.py DATA_DIR = pathlib.Path(__file__).parent / 'data' class DataTransferTest(QuiltTestCase): def test_select(self): # Note: The boto3 Stubber doesn't work properly with s3_client.select_object_content(). # The return value expects a dict where an iterable is in the actual results. chunks = [ b'{"foo": ', b'9, "b', b'ar": 3', b'}\n{"foo"', b': 9, "bar": 1}\n{"foo": 6, "bar": 9}\n{"foo":', b' 1, "bar": 7}\n{"foo":', b' 6, "bar": 1}\n{"foo": 6, "bar": 6}', b'\n{"foo": 9, "bar": 6}', b'\n{"foo": 6, "bar": 4}\n', b'{"foo": 2, "bar": 0}', b'\n{"foo": 2, "bar": 0}\n', ] records = [{'Records': {'Payload': chunk}} for chunk in chunks] # noinspection PyTypeChecker records.append({'Stats': { 'BytesScanned': 100, 'BytesProcessed': 100, 'BytesReturned': 210, }}) records.append({'End': {}}) expected_result = pd.DataFrame.from_records([ {'foo': 9, 'bar': 3}, {'foo': 9, 'bar': 1}, {'foo': 6, 'bar': 9}, {'foo': 1, 'bar': 7}, {'foo': 6, 'bar': 1}, {'foo': 6, 'bar': 6}, {'foo': 9, 'bar': 6}, {'foo': 6, 'bar': 4}, {'foo': 2, 'bar': 0}, {'foo': 2, 'bar': 0}, ]) # test normal use from extension expected_args = { 'Bucket': 'foo', 'Key': 'bar/baz.json', 'Expression': 'select * from S3Object', 'ExpressionType': 'SQL', 'InputSerialization': { 'CompressionType': 'NONE', 'JSON': {'Type': 'DOCUMENT'} }, 'OutputSerialization': {'JSON': {}}, } boto_return_val = {'Payload': iter(records)} with mock.patch.object(self.s3_client, 'select_object_content', return_value=boto_return_val) as patched: result = data_transfer.select(PhysicalKey.from_url('s3://foo/bar/baz.json'), 'select * from S3Object') patched.assert_called_once_with(**expected_args) assert result.equals(expected_result) with mock.patch.object(self.s3_client, 'select_object_content'): # No format determined. with pytest.raises(data_transfer.QuiltException): result = data_transfer.select(PhysicalKey.from_url('s3://foo/bar/baz'), 'select * from S3Object') # test format-specified in metadata expected_args = { 'Bucket': 'foo', 'Key': 'bar/baz', 'Expression': 'select * from S3Object', 'ExpressionType': 'SQL', 'InputSerialization': { 'CompressionType': 'NONE', 'JSON': {'Type': 'DOCUMENT'} }, 'OutputSerialization': {'JSON': {}}, } boto_return_val = {'Payload': iter(records)} with mock.patch.object(self.s3_client, 'select_object_content', return_value=boto_return_val) as patched: result = data_transfer.select(PhysicalKey.from_url('s3://foo/bar/baz'), 'select * from S3Object', meta={'target': 'json'}) assert result.equals(expected_result) patched.assert_called_once_with(**expected_args) # test compression is specified expected_args = { 'Bucket': 'foo', 'Key': 'bar/baz.json.gz', 'Expression': 'select * from S3Object', 'ExpressionType': 'SQL', 'InputSerialization': { 'CompressionType': 'GZIP', 'JSON': {'Type': 'DOCUMENT'} }, 'OutputSerialization': {'JSON': {}}, } boto_return_val = {'Payload': iter(records)} with mock.patch.object(self.s3_client, 'select_object_content', return_value=boto_return_val) as patched: # result ignored -- returned data isn't compressed, and this has already been tested. data_transfer.select(PhysicalKey.from_url('s3://foo/bar/baz.json.gz'), 'select * from S3Object') patched.assert_called_once_with(**expected_args) def test_get_size_and_version(self): response = { 'ETag': '12345', 'VersionId': '1.0', 'ContentLength': 123, } expected_params = { 'Bucket': 'my_bucket', 'Key': 'my_obj', } self.s3_stubber.add_response('head_object', response, expected_params) # Verify the verion is present assert data_transfer.get_size_and_version(PhysicalKey.from_url('s3://my_bucket/my_obj'))[1] == '1.0' def test_list_local_url(self): dir_path = DATA_DIR / 'dir' contents = set(list(data_transfer.list_url(PhysicalKey.from_path(dir_path)))) assert contents == set([ ('foo.txt', 4), ('x/blah.txt', 6) ]) def test_etag(self): assert data_transfer._calculate_etag(DATA_DIR / 'small_file.csv') == '"0bec5bf6f93c547bc9c6774acaf85e1a"' assert data_transfer._calculate_etag(DATA_DIR / 'buggy_parquet.parquet') == '"dfb5aca048931d396f4534395617363f"' def test_simple_upload(self): path = DATA_DIR / 'small_file.csv' # Unversioned bucket self.s3_stubber.add_response( method='put_object', service_response={ }, expected_params={ 'Body': ANY, 'Bucket': 'example', 'Key': 'foo.csv', } ) data_transfer.copy_file(PhysicalKey.from_path(path), PhysicalKey.from_url('s3://example/foo.csv')) def test_multi_upload(self): path1 = DATA_DIR / 'small_file.csv' path2 = DATA_DIR / 'dir/foo.txt' # Unversioned bucket self.s3_stubber.add_response( method='put_object', service_response={ }, expected_params={ 'Body': ANY, 'Bucket': 'example1', 'Key': 'foo.csv', } ) # Versioned bucket self.s3_stubber.add_response( method='put_object', service_response={ 'VersionId': 'v123' }, expected_params={ 'Body': ANY, 'Bucket': 'example2', 'Key': 'foo.txt', } ) # stubber expects responses in order, so disable multi-threading. with mock.patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1): urls = data_transfer.copy_file_list([ (PhysicalKey.from_path(path1), PhysicalKey.from_url('s3://example1/foo.csv'), path1.stat().st_size), (PhysicalKey.from_path(path2), PhysicalKey.from_url('s3://example2/foo.txt'), path2.stat().st_size), ]) assert urls[0] == PhysicalKey.from_url('s3://example1/foo.csv') assert urls[1] == PhysicalKey.from_url('s3://example2/foo.txt?versionId=v123') @pytest.mark.skip(reason="Broken due to S3ClientProvider") def test_upload_large_file(self): path = DATA_DIR / 'large_file.npy' self.s3_stubber.add_client_error( method='head_object', http_status_code=404, expected_params={ 'Bucket': 'example', 'Key': 'large_file.npy', } ) self.s3_stubber.add_response( method='put_object', service_response={ 'VersionId': 'v1' }, expected_params={ 'Body': ANY, 'Bucket': 'example', 'Key': 'large_file.npy', } ) urls = data_transfer.copy_file_list([ (PhysicalKey.from_path(path), PhysicalKey.from_url('s3://example/large_file.npy'), path.stat().st_size), ]) assert urls[0] == PhysicalKey.from_url('s3://example/large_file.npy?versionId=v1') def test_upload_large_file_etag_match(self): path = DATA_DIR / 'large_file.npy' self.s3_stubber.add_response( method='head_object', service_response={ 'ContentLength': path.stat().st_size, 'ETag': data_transfer._calculate_etag(path), 'VersionId': 'v1', }, expected_params={ 'Bucket': 'example', 'Key': 'large_file.npy', } ) urls = data_transfer.copy_file_list([ (PhysicalKey.from_path(path), PhysicalKey.from_url('s3://example/large_file.npy'), path.stat().st_size), ]) assert urls[0] == PhysicalKey.from_url('s3://example/large_file.npy?versionId=v1') def test_upload_large_file_etag_mismatch(self): path = DATA_DIR / 'large_file.npy' self.s3_stubber.add_response( method='head_object', service_response={ 'ContentLength': path.stat().st_size, 'ETag': '"123"', 'VersionId': 'v1', }, expected_params={ 'Bucket': 'example', 'Key': 'large_file.npy', } ) self.s3_stubber.add_response( method='put_object', service_response={ 'VersionId': 'v2' }, expected_params={ 'Body': ANY, 'Bucket': 'example', 'Key': 'large_file.npy', } ) urls = data_transfer.copy_file_list([ (PhysicalKey.from_path(path), PhysicalKey.from_url('s3://example/large_file.npy'), path.stat().st_size), ]) assert urls[0] == PhysicalKey.from_url('s3://example/large_file.npy?versionId=v2') @pytest.mark.skip(reason="Broken due to S3ClientProvider") def test_multipart_upload(self): name = 'very_large_file.bin' path = pathlib.Path(name) size = 30 * 1024 * 1024 chunksize = 8 * 1024 * 1024 chunks = -(-size // chunksize) # Create an empty 30MB file; shouldn't take up any actual space on any reasonable filesystem. with open(path, 'wb') as fd: fd.seek(size - 1) fd.write(b'!') self.s3_stubber.add_client_error( method='head_object', http_status_code=404, expected_params={ 'Bucket': 'example', 'Key': name, } ) self.s3_stubber.add_response( method='create_multipart_upload', service_response={ 'UploadId': '123' }, expected_params={ 'Bucket': 'example', 'Key': name, } ) for part_num in range(1, chunks+1): self.s3_stubber.add_response( method='upload_part', service_response={ 'ETag': 'etag%d' % part_num }, expected_params={ 'Bucket': 'example', 'Key': name, 'UploadId': '123', 'Body': ANY, 'PartNumber': part_num } ) self.s3_stubber.add_response( method='complete_multipart_upload', service_response={}, expected_params={ 'Bucket': 'example', 'Key': name, 'UploadId': '123', 'MultipartUpload': { 'Parts': [{ 'ETag': 'etag%d' % i, 'PartNumber': i } for i in range(1, chunks+1)] } } ) with mock.patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1): data_transfer.copy_file_list([ (PhysicalKey.from_path(path), PhysicalKey.from_url(f's3://example/{name}'), path.stat().st_size), ]) def test_multipart_copy(self): size = 100 * 1024 * 1024 * 1024 # size / 8MB would give us 12501 chunks - but the maximum allowed is 10000, # so we should end with 16MB chunks instead. chunksize = 8 * 1024 * 1024 assert size / chunksize > 10000 chunksize *= 2 chunks = -(-size // chunksize) assert chunks <= 10000 self.s3_stubber.add_response( method='create_multipart_upload', service_response={ 'UploadId': '123' }, expected_params={ 'Bucket': 'example2', 'Key': 'large_file2.npy', } ) for part_num in range(1, chunks+1): self.s3_stubber.add_response( method='upload_part_copy', service_response={ 'CopyPartResult': { 'ETag': 'etag%d' % part_num } }, expected_params={ 'Bucket': 'example2', 'Key': 'large_file2.npy', 'UploadId': '123', 'PartNumber': part_num, 'CopySource': { 'Bucket': 'example1', 'Key': 'large_file1.npy' }, 'CopySourceRange': 'bytes=%d-%d' % ( (part_num-1) * chunksize, min(part_num * chunksize, size) - 1 ) } ) self.s3_stubber.add_response( method='complete_multipart_upload', service_response={}, expected_params={ 'Bucket': 'example2', 'Key': 'large_file2.npy', 'UploadId': '123', 'MultipartUpload': { 'Parts': [{ 'ETag': 'etag%d' % i, 'PartNumber': i } for i in range(1, chunks+1)] } } ) with mock.patch('quilt3.data_transfer.s3_transfer_config.max_request_concurrency', 1): data_transfer.copy_file_list([ (PhysicalKey.from_url('s3://example1/large_file1.npy'), PhysicalKey.from_url('s3://example2/large_file2.npy'), size), ])
1
18,320
This seems unused.
quiltdata-quilt
py
@@ -184,8 +184,8 @@ func GetOSInterface() types.OSTypeInstaller { case CentOSType: return &CentOS{} default: + panic("unsupport os-release") } - return nil } //IsKubeEdgeController identifies if the node is having edge controller and k8s api-server already running.
1
/* Copyright 2019 The Kubeedge Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package util import ( "bytes" "fmt" "io" "os" "os/exec" "strings" "sync" "github.com/spf13/pflag" types "github.com/kubeedge/kubeedge/keadm/app/cmd/common" ) //Constants used by installers const ( UbuntuOSType = "ubuntu" CentOSType = "centos" DefaultDownloadURL = "https://download.docker.com" DockerPreqReqList = "apt-transport-https ca-certificates curl gnupg-agent software-properties-common" KubernetesDownloadURL = "https://apt.kubernetes.io/" KubernetesGPGURL = "https://packages.cloud.google.com/apt/doc/apt-key.gpg" KubeEdgeDownloadURL = "https://github.com/kubeedge/kubeedge/releases/download" KubeEdgePath = "/etc/kubeedge/" KubeEdgeConfPath = KubeEdgePath + "kubeedge/edge/conf" KubeEdgeBinaryName = "edge_core" KubeEdgeDefaultCertPath = KubeEdgePath + "certs/" KubeEdgeConfigEdgeYaml = KubeEdgeConfPath + "/edge.yaml" KubeEdgeConfigNodeJSON = KubeEdgeConfPath + "/node.json" KubeEdgeConfigLoggingYaml = KubeEdgeConfPath + "/logging.yaml" KubeEdgeConfigModulesYaml = KubeEdgeConfPath + "/modules.yaml" KubeEdgeCloudCertGenPath = KubeEdgePath + "certgen.sh" KubeEdgeEdgeCertsTarFileName = "certs.tgz" KubeEdgeEdgeCertsTarFilePath = KubeEdgePath + "certs.tgz" KubeEdgeCloudConfPath = KubeEdgePath + "kubeedge/cloud/conf" KubeEdgeControllerYaml = KubeEdgeCloudConfPath + "/controller.yaml" KubeEdgeControllerLoggingYaml = KubeEdgeCloudConfPath + "/logging.yaml" KubeEdgeControllerModulesYaml = KubeEdgeCloudConfPath + "/modules.yaml" KubeCloudBinaryName = "edgecontroller" KubeCloudApiserverYamlPath = "/etc/kubernetes/manifests/kube-apiserver.yaml" KubeCloudReplaceIndex = 25 KubeCloudReplaceString = " - --insecure-bind-address=0.0.0.0\n" KubeAPIServerName = "kube-apiserver" KubeEdgeHTTPProto = "http" KubeEdgeHTTPSProto = "https" KubeEdgeHTTPPort = "8080" KubeEdgeHTTPSPort = "6443" KubeEdgeHTTPRequestTimeout = 30 ) //AddToolVals gets the value and default values of each flags and collects them in temporary cache func AddToolVals(f *pflag.Flag, flagData map[string]types.FlagData) { flagData[f.Name] = types.FlagData{Val: f.Value.String(), DefVal: f.DefValue} } //CheckIfAvailable checks is val of a flag is empty then return the default value func CheckIfAvailable(val, defval string) string { if val == "" { return defval } return val } //Common struct contains OS and Tool version properties and also embeds OS interface type Common struct { types.OSTypeInstaller OSVersion string ToolVersion string KubeConfig string } //SetOSInterface defines a method to set the implemtation of the OS interface func (co *Common) SetOSInterface(intf types.OSTypeInstaller) { co.OSTypeInstaller = intf } //Command defines commands to be executed and captures std out and std error type Command struct { Cmd *exec.Cmd StdOut []byte StdErr []byte } //ExecuteCommand executes the command and captures the output in stdOut func (cm *Command) ExecuteCommand() { var err error cm.StdOut, err = cm.Cmd.Output() if err != nil { fmt.Println("Output failed: ", err) cm.StdErr = []byte(err.Error()) } } //GetStdOutput gets StdOut field func (cm Command) GetStdOutput() string { if len(cm.StdOut) != 0 { return strings.TrimRight(string(cm.StdOut), "\n") } return "" } //GetStdErr gets StdErr field func (cm Command) GetStdErr() string { if len(cm.StdErr) != 0 { return strings.TrimRight(string(cm.StdErr), "\n") } return "" } //ExecuteCmdShowOutput captures both StdOut and StdErr after exec.cmd(). //It helps in the commands where it takes some time for execution. func (cm Command) ExecuteCmdShowOutput() error { var stdoutBuf, stderrBuf bytes.Buffer stdoutIn, _ := cm.Cmd.StdoutPipe() stderrIn, _ := cm.Cmd.StderrPipe() var errStdout, errStderr error stdout := io.MultiWriter(os.Stdout, &stdoutBuf) stderr := io.MultiWriter(os.Stderr, &stderrBuf) err := cm.Cmd.Start() if err != nil { return fmt.Errorf("failed to start '%s' because of error : %s", strings.Join(cm.Cmd.Args, " "), err.Error()) } var wg sync.WaitGroup wg.Add(1) go func() { _, errStdout = io.Copy(stdout, stdoutIn) wg.Done() }() _, errStderr = io.Copy(stderr, stderrIn) wg.Wait() err = cm.Cmd.Wait() if err != nil { return fmt.Errorf("failed to run '%s' because of error : %s", strings.Join(cm.Cmd.Args, " "), err.Error()) } if errStdout != nil || errStderr != nil { return fmt.Errorf("failed to capture stdout or stderr") } cm.StdOut, cm.StdErr = stdoutBuf.Bytes(), stderrBuf.Bytes() return nil } //GetOSVersion gets the OS name func GetOSVersion() string { c := &Command{Cmd: exec.Command("sh", "-c", ". /etc/os-release && echo $ID")} c.ExecuteCommand() return c.GetStdOutput() } //GetOSInterface helps in returning OS specific object which implements OSTypeInstaller interface. func GetOSInterface() types.OSTypeInstaller { switch GetOSVersion() { case UbuntuOSType: return &UbuntuOS{} case CentOSType: return &CentOS{} default: } return nil } //IsKubeEdgeController identifies if the node is having edge controller and k8s api-server already running. //If so, then return true, else it can used as edge node and initialise it. func IsKubeEdgeController() (types.ModuleRunning, error) { osType := GetOSInterface() edgeControllerRunning, err := osType.IsKubeEdgeProcessRunning(KubeCloudBinaryName) if err != nil { return types.NoneRunning, err } apiServerRunning, err := osType.IsKubeEdgeProcessRunning(KubeAPIServerName) if err != nil { return types.NoneRunning, err } //If any of edgecontroller or K8S API server is running, then we believe the node is cloud node if edgeControllerRunning || apiServerRunning { return types.KubeEdgeCloudRunning, nil } edgeCoreRunning, err := osType.IsKubeEdgeProcessRunning(KubeEdgeBinaryName) if err != nil { return types.NoneRunning, err } if false != edgeCoreRunning { return types.KubeEdgeEdgeRunning, nil } return types.NoneRunning, nil }
1
12,834
@luguanglong , Thanks for the fix, can you re-phrase it to sound better something like "This OS version is currently un-supported by keadm"
kubeedge-kubeedge
go
@@ -53,6 +53,10 @@ from rdkit.Chem.Draw import rdMolDraw2D from rdkit.Chem import rdDepictor from rdkit.Chem import rdMolDescriptors as rdMD +def _CleanFpInfoAttr_(mol): + if hasattr(mol, '_fpInfo'): + delattr(mol, '_fpInfo') + def GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity): """
1
# # Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc. # Copyright (c) 2021, Greg Landrum # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Novartis Institutes for BioMedical Research Inc. # nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Created by Sereina Riniker, Aug 2013 import copy import math from numpy.lib.arraysetops import isin try: from matplotlib import cm from matplotlib.colors import LinearSegmentedColormap except ImportError: cm = None except RuntimeError: cm = None import numpy from rdkit import Chem from rdkit import DataStructs from rdkit import Geometry from rdkit.Chem import Draw from rdkit.Chem.Draw import rdMolDraw2D from rdkit.Chem import rdDepictor from rdkit.Chem import rdMolDescriptors as rdMD def GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity): """ Calculates the atomic weights for the probe molecule based on a fingerprint function and a metric. Parameters: refMol -- the reference molecule probeMol -- the probe molecule fpFunction -- the fingerprint function metric -- the similarity metric Note: If fpFunction needs additional parameters, use a lambda construct """ if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo') if hasattr(refMol, '_fpInfo'): delattr(refMol, '_fpInfo') refFP = fpFunction(refMol, -1) probeFP = fpFunction(probeMol, -1) baseSimilarity = metric(refFP, probeFP) # loop over atoms weights = [] for atomId in range(probeMol.GetNumAtoms()): newFP = fpFunction(probeMol, atomId) newSimilarity = metric(refFP, newFP) weights.append(baseSimilarity - newSimilarity) if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo') if hasattr(refMol, '_fpInfo'): delattr(refMol, '_fpInfo') return weights def GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction): """ Calculates the atomic weights for the probe molecule based on a fingerprint function and the prediction function of a ML model. Parameters: probeMol -- the probe molecule fpFunction -- the fingerprint function predictionFunction -- the prediction function of the ML model """ if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo') probeFP = fpFunction(probeMol, -1) baseProba = predictionFunction(probeFP) # loop over atoms weights = [] for atomId in range(probeMol.GetNumAtoms()): newFP = fpFunction(probeMol, atomId) newProba = predictionFunction(newFP) weights.append(baseProba - newProba) if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo') return weights def GetStandardizedWeights(weights): """ Normalizes the weights, such that the absolute maximum weight equals 1.0. Parameters: weights -- the list with the atomic weights """ tmp = [math.fabs(w) for w in weights] currentMax = max(tmp) if currentMax > 0: return [w / currentMax for w in weights], currentMax else: return weights, currentMax def GetSimilarityMapFromWeights(mol, weights, colorMap=None, scale=-1, size=(250, 250), sigma=None, coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, draw2d=None, **kwargs): """ Generates the similarity map for a molecule given the atomic weights. Parameters: mol -- the molecule of interest colorMap -- the matplotlib color map scheme, default is custom PiWG color map scale -- the scaling: scale < 0 -> the absolute maximum weight is used as maximum scale scale = double -> this is the maximum scale size -- the size of the figure sigma -- the sigma for the Gaussians coordScale -- scaling factor for the coordinates step -- the step for calcAtomGaussian colors -- color of the contour lines contourLines -- if integer number N: N contour lines are drawn if list(numbers): contour lines at these numbers are drawn alpha -- the alpha blending value for the contour lines kwargs -- additional arguments for drawing """ if mol.GetNumAtoms() < 2: raise ValueError("too few atoms") if draw2d is not None: mol = rdMolDraw2D.PrepareMolForDrawing(mol, addChiralHs=False) if not mol.GetNumConformers(): rdDepictor.Compute2DCoords(mol) if sigma is None: if mol.GetNumBonds() > 0: bond = mol.GetBondWithIdx(0) idx1 = bond.GetBeginAtomIdx() idx2 = bond.GetEndAtomIdx() sigma = 0.3 * (mol.GetConformer().GetAtomPosition(idx1) - mol.GetConformer().GetAtomPosition(idx2)).Length() else: sigma = 0.3 * (mol.GetConformer().GetAtomPosition(0) - mol.GetConformer().GetAtomPosition(1)).Length() sigma = round(sigma, 2) sigmas = [sigma] * mol.GetNumAtoms() locs = [] for i in range(mol.GetNumAtoms()): p = mol.GetConformer().GetAtomPosition(i) locs.append(Geometry.Point2D(p.x, p.y)) draw2d.ClearDrawing() ps = Draw.ContourParams() ps.fillGrid = True ps.gridResolution = 0.1 ps.extraGridPadding = 0.5 if colorMap is not None: if cm is not None and isinstance(colorMap, type(cm.Blues)): # it's a matplotlib colormap: clrs = [tuple(x) for x in colorMap([0, 0.5, 1])] else: clrs = [colorMap[0], colorMap[1], colorMap[2]] ps.setColourMap(clrs) Draw.ContourAndDrawGaussians(draw2d, locs, weights, sigmas, nContours=contourLines, params=ps) draw2d.drawOptions().clearBackground = False draw2d.DrawMolecule(mol) return draw2d fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs) if sigma is None: if mol.GetNumBonds() > 0: bond = mol.GetBondWithIdx(0) idx1 = bond.GetBeginAtomIdx() idx2 = bond.GetEndAtomIdx() sigma = 0.3 * math.sqrt( sum([(mol._atomPs[idx1][i] - mol._atomPs[idx2][i])**2 for i in range(2)])) else: sigma = 0.3 * \ math.sqrt(sum([(mol._atomPs[0][i] - mol._atomPs[1][i])**2 for i in range(2)])) sigma = round(sigma, 2) x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step) # scaling if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z))) else: maxScale = scale # coloring if colorMap is None: if cm is None: raise RuntimeError("matplotlib failed to import") PiYG_cmap = cm.get_cmap('PiYG', 2) colorMap = LinearSegmentedColormap.from_list( 'PiWG', [PiYG_cmap(0), (1.0, 1.0, 1.0), PiYG_cmap(1)], N=255) fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0, 1, 0, 1), vmin=-maxScale, vmax=maxScale) # contour lines # only draw them when at least one weight is not zero if len([w for w in weights if w != 0.0]): contourset = fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs) for j, c in enumerate(contourset.collections): if contourset.levels[j] == 0.0: c.set_linewidth(0.0) elif contourset.levels[j] < 0: c.set_dashes([(0, (3.0, 3.0))]) fig.axes[0].set_axis_off() return fig def GetSimilarityMapForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity, **kwargs): """ Generates the similarity map for a given reference and probe molecule, fingerprint function and similarity metric. Parameters: refMol -- the reference molecule probeMol -- the probe molecule fpFunction -- the fingerprint function metric -- the similarity metric. kwargs -- additional arguments for drawing """ weights = GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric) weights, maxWeight = GetStandardizedWeights(weights) fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs) return fig, maxWeight def GetSimilarityMapForModel(probeMol, fpFunction, predictionFunction, **kwargs): """ Generates the similarity map for a given ML model and probe molecule, and fingerprint function. Parameters: probeMol -- the probe molecule fpFunction -- the fingerprint function predictionFunction -- the prediction function of the ML model kwargs -- additional arguments for drawing """ weights = GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction) weights, maxWeight = GetStandardizedWeights(weights) fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs) return fig, maxWeight apDict = {} apDict['normal'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetAtomPairFingerprint( m, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs) apDict['hashed'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprint( m, nBits=bits, minLength=minl, maxLength=maxl, ignoreAtoms=ia, **kwargs) apDict[ 'bv'] = lambda m, bits, minl, maxl, bpe, ia, **kwargs: rdMD.GetHashedAtomPairFingerprintAsBitVect( m, nBits=bits, minLength=minl, maxLength=maxl, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs) # usage: lambda m,i: GetAPFingerprint(m, i, fpType, nBits, minLength, maxLength, nBitsPerEntry) def GetAPFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, minLength=1, maxLength=30, nBitsPerEntry=4, **kwargs): """ Calculates the atom pairs fingerprint with the torsions of atomId removed. Parameters: mol -- the molecule of interest atomId -- the atom to remove the pairs for (if -1, no pair is removed) fpType -- the type of AP fingerprint ('normal', 'hashed', 'bv') nBits -- the size of the bit vector (only for fpType='bv') minLength -- the minimum path length for an atom pair maxLength -- the maxmimum path length for an atom pair nBitsPerEntry -- the number of bits available for each pair """ if fpType not in ['normal', 'hashed', 'bv']: raise ValueError("Unknown Atom pairs fingerprint type") if atomId < 0: return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, 0, **kwargs) if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms") return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, [atomId], **kwargs) ttDict = {} ttDict['normal'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetTopologicalTorsionFingerprint( m, targetSize=ts, ignoreAtoms=ia, **kwargs) ttDict[ 'hashed'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprint( m, nBits=bits, targetSize=ts, ignoreAtoms=ia, **kwargs) ttDict[ 'bv'] = lambda m, bits, ts, bpe, ia, **kwargs: rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect( m, nBits=bits, targetSize=ts, nBitsPerEntry=bpe, ignoreAtoms=ia, **kwargs) # usage: lambda m,i: GetTTFingerprint(m, i, fpType, nBits, targetSize) def GetTTFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, targetSize=4, nBitsPerEntry=4, **kwargs): """ Calculates the topological torsion fingerprint with the pairs of atomId removed. Parameters: mol -- the molecule of interest atomId -- the atom to remove the torsions for (if -1, no torsion is removed) fpType -- the type of TT fingerprint ('normal', 'hashed', 'bv') nBits -- the size of the bit vector (only for fpType='bv') minLength -- the minimum path length for an atom pair maxLength -- the maxmimum path length for an atom pair nBitsPerEntry -- the number of bits available for each torsion any additional keyword arguments will be passed to the fingerprinting function. """ if fpType not in ['normal', 'hashed', 'bv']: raise ValueError("Unknown Topological torsion fingerprint type") if atomId < 0: return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, 0, **kwargs) if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms") return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, [atomId], **kwargs) # usage: lambda m,i: GetMorganFingerprint(m, i, radius, fpType, nBits, useFeatures) def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False, **kwargs): """ Calculates the Morgan fingerprint with the environments of atomId removed. Parameters: mol -- the molecule of interest radius -- the maximum radius fpType -- the type of Morgan fingerprint: 'count' or 'bv' atomId -- the atom to remove the environments for (if -1, no environments is removed) nBits -- the size of the bit vector (only for fpType = 'bv') useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan any additional keyword arguments will be passed to the fingerprinting function. """ if fpType not in ['bv', 'count']: raise ValueError("Unknown Morgan fingerprint type") if not hasattr(mol, '_fpInfo'): info = {} # get the fingerprint if fpType == 'bv': molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures, bitInfo=info, **kwargs) else: molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info, **kwargs) # construct the bit map if fpType == 'bv': bitmap = [DataStructs.ExplicitBitVect(nBits) for _ in range(mol.GetNumAtoms())] else: bitmap = [[] for _ in range(mol.GetNumAtoms())] for bit, es in info.items(): for at1, rad in es: if rad == 0: # for radius 0 if fpType == 'bv': bitmap[at1][bit] = 1 else: bitmap[at1].append(bit) else: # for radii > 0 env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1) amap = {} Chem.PathToSubmol(mol, env, atomMap=amap) for at2 in amap.keys(): if fpType == 'bv': bitmap[at2][bit] = 1 else: bitmap[at2].append(bit) mol._fpInfo = (molFp, bitmap) if atomId < 0: return mol._fpInfo[0] else: # remove the bits of atomId if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms") if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set") if fpType == 'bv': molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor else: # count molFp = copy.deepcopy(mol._fpInfo[0]) # delete the bits with atomId for bit in mol._fpInfo[1][atomId]: molFp[bit] -= 1 return molFp # usage: lambda m,i: GetRDKFingerprint(m, i, fpType, nBits, minPath, maxPath, nBitsPerHash) def GetRDKFingerprint(mol, atomId=-1, fpType='bv', nBits=2048, minPath=1, maxPath=5, nBitsPerHash=2, **kwargs): """ Calculates the RDKit fingerprint with the paths of atomId removed. Parameters: mol -- the molecule of interest atomId -- the atom to remove the paths for (if -1, no path is removed) fpType -- the type of RDKit fingerprint: 'bv' nBits -- the size of the bit vector minPath -- minimum path length maxPath -- maximum path length nBitsPerHash -- number of to set per path """ if fpType not in ['bv', '']: raise ValueError("Unknown RDKit fingerprint type") fpType = 'bv' if not hasattr(mol, '_fpInfo'): info = [] # list with bits for each atom # get the fingerprint molFp = Chem.RDKFingerprint(mol, fpSize=nBits, minPath=minPath, maxPath=maxPath, nBitsPerHash=nBitsPerHash, atomBits=info, **kwargs) mol._fpInfo = (molFp, info) if atomId < 0: return mol._fpInfo[0] else: # remove the bits of atomId if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms") if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set") molFp = copy.deepcopy(mol._fpInfo[0]) molFp.UnSetBitsFromList(mol._fpInfo[1][atomId]) return molFp
1
24,000
should probably be called `_DeleteFpInfoAttr` because it removes it. Cleaning gives the impression it is still there. I would also move this to the end of the function `GetAtomicWeightsForFingerprint`.
rdkit-rdkit
cpp
@@ -7101,6 +7101,13 @@ bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuf "(%zu) when pipeline layout was created", firstSet, setCount, pipeline_layout->set_layouts.size()); } + + static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { + std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361"), + std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361"), + std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindDescriptorSets-pipelineBindPoint-00361")}; + skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); + return skip; }
1
/* Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * Copyright (C) 2015-2021 Google Inc. * Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Cody Northrop <[email protected]> * Author: Michael Lentine <[email protected]> * Author: Tobin Ehlis <[email protected]> * Author: Chia-I Wu <[email protected]> * Author: Chris Forbes <[email protected]> * Author: Mark Lobodzinski <[email protected]> * Author: Ian Elliott <[email protected]> * Author: Dave Houlton <[email protected]> * Author: Dustin Graves <[email protected]> * Author: Jeremy Hayes <[email protected]> * Author: Jon Ashburn <[email protected]> * Author: Karl Schultz <[email protected]> * Author: Mark Young <[email protected]> * Author: Mike Schuchardt <[email protected]> * Author: Mike Weiblen <[email protected]> * Author: Tony Barbour <[email protected]> * Author: John Zulauf <[email protected]> * Author: Shannon McPherson <[email protected]> * Author: Jeremy Kniager <[email protected]> * Author: Tobias Hector <[email protected]> * Author: Jeremy Gebben <[email protected]> */ #include <algorithm> #include <array> #include <assert.h> #include <cmath> #include <fstream> #include <iostream> #include <list> #include <map> #include <memory> #include <mutex> #include <set> #include <sstream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <string> #include <valarray> #include "vk_loader_platform.h" #include "vk_enum_string_helper.h" #include "chassis.h" #include "convert_to_renderpass2.h" #include "core_validation.h" #include "buffer_validation.h" #include "shader_validation.h" #include "vk_layer_utils.h" #include "sync_utils.h" #include "sync_vuid_maps.h" // these templates are defined in buffer_validation.cpp so we need to pull in the explicit instantiations from there extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const VkImageMemoryBarrier *barrier); extern template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const VkImageMemoryBarrier2KHR *barrier); extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state, const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle, const VkImageMemoryBarrier &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const; extern template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state, const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass, const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle, const VkImageMemoryBarrier2KHR &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const; using std::max; using std::string; using std::stringstream; using std::unique_ptr; using std::vector; void CoreChecks::AddInitialLayoutintoImageLayoutMap(const IMAGE_STATE &image_state, GlobalImageLayoutMap &image_layout_map) { auto *range_map = GetLayoutRangeMap(image_layout_map, image_state); auto range_gen = subresource_adapter::RangeGenerator(image_state.subresource_encoder, image_state.full_range); for (; range_gen->non_empty(); ++range_gen) { range_map->insert(range_map->end(), std::make_pair(*range_gen, image_state.createInfo.initialLayout)); } } // Override base class, we have some extra work to do here void CoreChecks::InitDeviceValidationObject(bool add_obj, ValidationObject *inst_obj, ValidationObject *dev_obj) { if (add_obj) { ValidationStateTracker::InitDeviceValidationObject(add_obj, inst_obj, dev_obj); } } // For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value. template <typename T1> bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object, const VulkanTypedHandle &typed_handle, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid<T1, SimpleErrorLocation>(mem_state, object, typed_handle, {api_name, error_code}); } template <typename T1, typename LocType> bool CoreChecks::VerifyBoundMemoryIsValid(const DEVICE_MEMORY_STATE *mem_state, const T1 object, const VulkanTypedHandle &typed_handle, const LocType &location) const { bool result = false; auto type_name = object_string[typed_handle.type]; if (!mem_state) { result |= LogError(object, location.Vuid(), "%s: %s used with no memory bound. Memory should be bound by calling vkBind%sMemory().", location.FuncName(), report_data->FormatHandle(typed_handle).c_str(), type_name + 2); } else if (mem_state->Destroyed()) { result |= LogError(object, location.Vuid(), "%s: %s used with no memory bound and previously bound memory was freed. Memory must not be freed " "prior to this operation.", location.FuncName(), report_data->FormatHandle(typed_handle).c_str()); } return result; } // Check to see if memory was ever bound to this image bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const Location &loc) const { using LocationAdapter = core_error::LocationVuidAdapter<sync_vuid_maps::GetImageBarrierVUIDFunctor>; return ValidateMemoryIsBoundToImage<LocationAdapter>(image_state, LocationAdapter(loc, sync_vuid_maps::ImageError::kNoMemory)); } bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const char *api_name, const char *error_code) const { return ValidateMemoryIsBoundToImage<SimpleErrorLocation>(image_state, SimpleErrorLocation(api_name, error_code)); } template <typename LocType> bool CoreChecks::ValidateMemoryIsBoundToImage(const IMAGE_STATE *image_state, const LocType &location) const { bool result = false; if (image_state->create_from_swapchain != VK_NULL_HANDLE) { if (image_state->bind_swapchain == VK_NULL_HANDLE) { LogObjectList objlist(image_state->image()); objlist.add(image_state->create_from_swapchain); result |= LogError( objlist, location.Vuid(), "%s: %s is created by %s, and the image should be bound by calling vkBindImageMemory2(), and the pNext chain " "includes VkBindImageMemorySwapchainInfoKHR.", location.FuncName(), report_data->FormatHandle(image_state->image()).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str()); } else if (image_state->create_from_swapchain != image_state->bind_swapchain) { LogObjectList objlist(image_state->image()); objlist.add(image_state->create_from_swapchain); objlist.add(image_state->bind_swapchain); result |= LogError(objlist, location.Vuid(), "%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same " "swapchain", location.FuncName(), report_data->FormatHandle(image_state->image()).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str(), report_data->FormatHandle(image_state->bind_swapchain).c_str()); } } else if (image_state->IsExternalAHB()) { // TODO look into how to properly check for a valid bound memory for an external AHB } else if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) { result |= VerifyBoundMemoryIsValid(image_state->MemState(), image_state->image(), image_state->Handle(), location); } return result; } // Check to see if memory was bound to this buffer bool CoreChecks::ValidateMemoryIsBoundToBuffer(const BUFFER_STATE *buffer_state, const char *api_name, const char *error_code) const { bool result = false; if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) { result |= VerifyBoundMemoryIsValid(buffer_state->MemState(), buffer_state->buffer(), buffer_state->Handle(), api_name, error_code); } return result; } // Check to see if memory was bound to this acceleration structure bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE *as_state, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid(as_state->MemState(), as_state->acceleration_structure(), as_state->Handle(), api_name, error_code); } // Check to see if memory was bound to this acceleration structure bool CoreChecks::ValidateMemoryIsBoundToAccelerationStructure(const ACCELERATION_STRUCTURE_STATE_KHR *as_state, const char *api_name, const char *error_code) const { return VerifyBoundMemoryIsValid(as_state->MemState(), as_state->acceleration_structure(), as_state->Handle(), api_name, error_code); } // Valid usage checks for a call to SetMemBinding(). // For NULL mem case, output warning // Make sure given object is in global object map // IF a previous binding existed, output validation error // Otherwise, add reference from objectInfo to memoryInfo // Add reference off of objInfo // TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions. bool CoreChecks::ValidateSetMemBinding(VkDeviceMemory mem, const VulkanTypedHandle &typed_handle, const char *apiName) const { bool skip = false; // It's an error to bind an object to NULL memory if (mem != VK_NULL_HANDLE) { const BINDABLE *mem_binding = ValidationStateTracker::GetObjectMemBinding(typed_handle); assert(mem_binding); if (mem_binding->sparse) { const char *error_code = nullptr; const char *handle_type = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { handle_type = "BUFFER"; if (strcmp(apiName, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-buffer-01030"; } else { error_code = "VUID-VkBindBufferMemoryInfo-buffer-01030"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { handle_type = "IMAGE"; if (strcmp(apiName, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-image-01045"; } else { error_code = "VUID-VkBindImageMemoryInfo-image-01045"; } } else { // Unsupported object type assert(false); } LogObjectList objlist(mem); objlist.add(typed_handle); skip |= LogError(objlist, error_code, "In %s, attempting to bind %s to %s which was created with sparse memory flags " "(VK_%s_CREATE_SPARSE_*_BIT).", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), handle_type); } const DEVICE_MEMORY_STATE *mem_info = ValidationStateTracker::GetDevMemState(mem); if (mem_info) { const DEVICE_MEMORY_STATE *prev_binding = mem_binding->MemState(); if (prev_binding) { if (!prev_binding->Destroyed()) { const char *error_code = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { if (strcmp(apiName, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-buffer-01029"; } else { error_code = "VUID-VkBindBufferMemoryInfo-buffer-01029"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { if (strcmp(apiName, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-image-01044"; } else { error_code = "VUID-VkBindImageMemoryInfo-image-01044"; } } else { // Unsupported object type assert(false); } LogObjectList objlist(mem); objlist.add(typed_handle); objlist.add(prev_binding->mem()); skip |= LogError(objlist, error_code, "In %s, attempting to bind %s to %s which has already been bound to %s.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str(), report_data->FormatHandle(prev_binding->mem()).c_str()); } else { LogObjectList objlist(mem); objlist.add(typed_handle); skip |= LogError(objlist, kVUID_Core_MemTrack_RebindObject, "In %s, attempting to bind %s to %s which was previous bound to memory that has " "since been freed. Memory bindings are immutable in " "Vulkan so this attempt to bind to new memory is not allowed.", apiName, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(typed_handle).c_str()); } } } } return skip; } bool CoreChecks::ValidateDeviceQueueFamily(uint32_t queue_family, const char *cmd_name, const char *parameter_name, const char *error_code, bool optional = false) const { bool skip = false; if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError(device, error_code, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name); } else if (queue_family_index_set.find(queue_family) == queue_family_index_set.end()) { skip |= LogError(device, error_code, "%s: %s (= %" PRIu32 ") is not one of the queue families given via VkDeviceQueueCreateInfo structures when the device was created.", cmd_name, parameter_name, queue_family); } return skip; } // Validate the specified queue families against the families supported by the physical device that owns this device bool CoreChecks::ValidatePhysicalDeviceQueueFamilies(uint32_t queue_family_count, const uint32_t *queue_families, const char *cmd_name, const char *array_parameter_name, const char *vuid) const { bool skip = false; if (queue_families) { layer_data::unordered_set<uint32_t> set; for (uint32_t i = 0; i < queue_family_count; ++i) { std::string parameter_name = std::string(array_parameter_name) + "[" + std::to_string(i) + "]"; if (set.count(queue_families[i])) { skip |= LogError(device, vuid, "%s: %s (=%" PRIu32 ") is not unique within %s array.", cmd_name, parameter_name.c_str(), queue_families[i], array_parameter_name); } else { set.insert(queue_families[i]); if (queue_families[i] == VK_QUEUE_FAMILY_IGNORED) { skip |= LogError( device, vuid, "%s: %s is VK_QUEUE_FAMILY_IGNORED, but it is required to provide a valid queue family index value.", cmd_name, parameter_name.c_str()); } else if (queue_families[i] >= physical_device_state->queue_family_known_count) { LogObjectList obj_list(physical_device); obj_list.add(device); skip |= LogError(obj_list, vuid, "%s: %s (= %" PRIu32 ") is not one of the queue families supported by the parent PhysicalDevice %s of this device %s.", cmd_name, parameter_name.c_str(), queue_families[i], report_data->FormatHandle(physical_device).c_str(), report_data->FormatHandle(device).c_str()); } } } } return skip; } // Check object status for selected flag state bool CoreChecks::ValidateStatus(const CMD_BUFFER_STATE *pNode, CBStatusFlags status_mask, const char *fail_msg, const char *msg_code) const { if (!(pNode->status & status_mask)) { return LogError(pNode->commandBuffer(), msg_code, "%s: %s.", report_data->FormatHandle(pNode->commandBuffer()).c_str(), fail_msg); } return false; } // Return true if for a given PSO, the given state enum is dynamic, else return false bool CoreChecks::IsDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) const { if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) { for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) { if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) return true; } } return false; } // Validate state stored as flags at time of draw call bool CoreChecks::ValidateDrawStateFlags(const CMD_BUFFER_STATE *pCB, const PIPELINE_STATE *pPipe, bool indexed, const char *msg_code) const { bool result = false; if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { result |= ValidateStatus(pCB, CBSTATUS_LINE_WIDTH_SET, "Dynamic line width state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pRasterizationState && (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_SET, "Dynamic depth bias state not set for this command buffer", msg_code); } if (pPipe->blendConstantsEnabled) { result |= ValidateStatus(pCB, CBSTATUS_BLEND_CONSTANTS_SET, "Dynamic blend constants state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_DEPTH_BOUNDS_SET, "Dynamic depth bounds state not set for this command buffer", msg_code); } if (pPipe->graphicsPipelineCI.pDepthStencilState && (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) { result |= ValidateStatus(pCB, CBSTATUS_STENCIL_READ_MASK_SET, "Dynamic stencil read mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, "Dynamic stencil write mask state not set for this command buffer", msg_code); result |= ValidateStatus(pCB, CBSTATUS_STENCIL_REFERENCE_SET, "Dynamic stencil reference state not set for this command buffer", msg_code); } if (indexed) { result |= ValidateStatus(pCB, CBSTATUS_INDEX_BUFFER_BOUND, "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code); } if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) { const auto *line_state = LvlFindInChain<VkPipelineRasterizationLineStateCreateInfoEXT>(pPipe->graphicsPipelineCI.pRasterizationState->pNext); if (line_state && line_state->stippledLineEnable) { result |= ValidateStatus(pCB, CBSTATUS_LINE_STIPPLE_SET, "Dynamic line stipple state not set for this command buffer", msg_code); } } return result; } bool CoreChecks::LogInvalidAttachmentMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *msg, const char *caller, const char *error_code) const { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s Attachment %u is not " "compatible with %u: %s.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), primary_attach, secondary_attach, msg); } bool CoreChecks::ValidateAttachmentCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach, const char *caller, const char *error_code) const { bool skip = false; const auto &primary_pass_ci = rp1_state->createInfo; const auto &secondary_pass_ci = rp2_state->createInfo; if (primary_pass_ci.attachmentCount <= primary_attach) { primary_attach = VK_ATTACHMENT_UNUSED; } if (secondary_pass_ci.attachmentCount <= secondary_attach) { secondary_attach = VK_ATTACHMENT_UNUSED; } if (primary_attach == VK_ATTACHMENT_UNUSED && secondary_attach == VK_ATTACHMENT_UNUSED) { return skip; } if (primary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The first is unused while the second is not.", caller, error_code); return skip; } if (secondary_attach == VK_ATTACHMENT_UNUSED) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "The second is unused while the first is not.", caller, error_code); return skip; } if (primary_pass_ci.pAttachments[primary_attach].format != secondary_pass_ci.pAttachments[secondary_attach].format) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different formats.", caller, error_code); } if (primary_pass_ci.pAttachments[primary_attach].samples != secondary_pass_ci.pAttachments[secondary_attach].samples) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different samples.", caller, error_code); } if (primary_pass_ci.pAttachments[primary_attach].flags != secondary_pass_ci.pAttachments[secondary_attach].flags) { skip |= LogInvalidAttachmentMessage(type1_string, rp1_state, type2_string, rp2_state, primary_attach, secondary_attach, "They have different flags.", caller, error_code); } return skip; } bool CoreChecks::ValidateSubpassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller, const char *error_code) const { bool skip = false; const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass]; const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass]; uint32_t max_input_attachment_count = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount); for (uint32_t i = 0; i < max_input_attachment_count; ++i) { uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.inputAttachmentCount) { primary_input_attach = primary_desc.pInputAttachments[i].attachment; } if (i < secondary_desc.inputAttachmentCount) { secondary_input_attach = secondary_desc.pInputAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } uint32_t max_color_attachment_count = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount); for (uint32_t i = 0; i < max_color_attachment_count; ++i) { uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount) { primary_color_attach = primary_desc.pColorAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount) { secondary_color_attach = secondary_desc.pColorAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_color_attach, secondary_color_attach, caller, error_code); if (rp1_state->createInfo.subpassCount > 1) { uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED; if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) { primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment; } if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) { secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_resolve_attach, secondary_resolve_attach, caller, error_code); } } uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED; if (primary_desc.pDepthStencilAttachment) { primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment; } if (secondary_desc.pDepthStencilAttachment) { secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment; } skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_depthstencil_attach, secondary_depthstencil_attach, caller, error_code); // Both renderpasses must agree on Multiview usage if (primary_desc.viewMask && secondary_desc.viewMask) { if (primary_desc.viewMask != secondary_desc.viewMask) { std::stringstream ss; ss << "For subpass " << subpass << ", they have a different viewMask. The first has view mask " << primary_desc.viewMask << " while the second has view mask " << secondary_desc.viewMask << "."; skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, ss.str().c_str(), caller, error_code); } } else if (primary_desc.viewMask) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The first uses Multiview (has non-zero viewMasks) while the second one does not.", caller, error_code); } else if (secondary_desc.viewMask) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The second uses Multiview (has non-zero viewMasks) while the first one does not.", caller, error_code); } return skip; } bool CoreChecks::LogInvalidPnextMessage(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *msg, const char *caller, const char *error_code) const { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); return LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s and %s w/ %s: %s", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), msg); } // Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible. // This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and // will then feed into this function bool CoreChecks::ValidateRenderPassCompatibility(const char *type1_string, const RENDER_PASS_STATE *rp1_state, const char *type2_string, const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) const { bool skip = false; // createInfo flags must be identical for the renderpasses to be compatible. if (rp1_state->createInfo.flags != rp2_state->createInfo.flags) { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); skip |= LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s with flags of %u and %s w/ " "%s with a flags of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), rp1_state->createInfo.flags, type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), rp2_state->createInfo.flags); } if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) { LogObjectList objlist(rp1_state->renderPass()); objlist.add(rp2_state->renderPass()); skip |= LogError(objlist, error_code, "%s: RenderPasses incompatible between %s w/ %s with a subpassCount of %u and %s w/ " "%s with a subpassCount of %u.", caller, type1_string, report_data->FormatHandle(rp1_state->renderPass()).c_str(), rp1_state->createInfo.subpassCount, type2_string, report_data->FormatHandle(rp2_state->renderPass()).c_str(), rp2_state->createInfo.subpassCount); } else { for (uint32_t i = 0; i < rp1_state->createInfo.subpassCount; ++i) { skip |= ValidateSubpassCompatibility(type1_string, rp1_state, type2_string, rp2_state, i, caller, error_code); } } // Find an entry of the Fragment Density Map type in the pNext chain, if it exists const auto fdm1 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp1_state->createInfo.pNext); const auto fdm2 = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rp2_state->createInfo.pNext); // Both renderpasses must agree on usage of a Fragment Density Map type if (fdm1 && fdm2) { uint32_t primary_input_attach = fdm1->fragmentDensityMapAttachment.attachment; uint32_t secondary_input_attach = fdm2->fragmentDensityMapAttachment.attachment; skip |= ValidateAttachmentCompatibility(type1_string, rp1_state, type2_string, rp2_state, primary_input_attach, secondary_input_attach, caller, error_code); } else if (fdm1) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The first uses a Fragment Density Map while the second one does not.", caller, error_code); } else if (fdm2) { skip |= LogInvalidPnextMessage(type1_string, rp1_state, type2_string, rp2_state, "The second uses a Fragment Density Map while the first one does not.", caller, error_code); } return skip; } // For given pipeline, return number of MSAA samples, or one if MSAA disabled static VkSampleCountFlagBits GetNumSamples(PIPELINE_STATE const *pipe) { if (pipe->graphicsPipelineCI.pMultisampleState != NULL && VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) { return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples; } return VK_SAMPLE_COUNT_1_BIT; } static void ListBits(std::ostream &s, uint32_t bits) { for (int i = 0; i < 32 && bits; i++) { if (bits & (1 << i)) { s << i; bits &= ~(1 << i); if (bits) { s << ","; } } } } std::string DynamicStateString(CBStatusFlags input_value) { std::string ret; int index = 0; while (input_value) { if (input_value & 1) { if (!ret.empty()) ret.append("|"); ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(1 << index)))); } ++index; input_value >>= 1; } if (ret.empty()) ret.append(string_VkDynamicState(ConvertToDynamicState(static_cast<CBStatusFlagBits>(0)))); return ret; } // Validate draw-time state related to the PSO bool CoreChecks::ValidatePipelineDrawtimeState(const LAST_BOUND_STATE &state, const CMD_BUFFER_STATE *pCB, CMD_TYPE cmd_type, const PIPELINE_STATE *pPipeline, const char *caller) const { bool skip = false; const auto &current_vtx_bfr_binding_info = pCB->current_vertex_buffer_binding_info.vertex_buffer_bindings; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); // Verify vertex & index buffer for unprotected command buffer. // Because vertex & index buffer is read only, it doesn't need to care protected command buffer case. if (enabled_features.core11.protectedMemory == VK_TRUE) { for (const auto &buffer_binding : current_vtx_bfr_binding_info) { if (buffer_binding.buffer_state && !buffer_binding.buffer_state->Destroyed()) { skip |= ValidateProtectedBuffer(pCB, buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer, "Buffer is vertex buffer"); } } if (pCB->index_buffer_binding.buffer_state && !pCB->index_buffer_binding.buffer_state->Destroyed()) { skip |= ValidateProtectedBuffer(pCB, pCB->index_buffer_binding.buffer_state.get(), caller, vuid.unprotected_command_buffer, "Buffer is index buffer"); } } // Verify if using dynamic state setting commands that it doesn't set up in pipeline CBStatusFlags invalid_status = CBSTATUS_ALL_STATE_SET & ~(pCB->dynamic_status | pCB->static_status); if (invalid_status) { std::string dynamic_states = DynamicStateString(invalid_status); LogObjectList objlist(pCB->commandBuffer()); objlist.add(pPipeline->pipeline()); skip |= LogError(objlist, vuid.dynamic_state_setting_commands, "%s: %s doesn't set up %s, but it calls the related dynamic state setting commands", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), dynamic_states.c_str()); } // Verify vertex binding if (pPipeline->vertex_binding_descriptions_.size() > 0) { for (size_t i = 0; i < pPipeline->vertex_binding_descriptions_.size(); i++) { const auto vertex_binding = pPipeline->vertex_binding_descriptions_[i].binding; if (current_vtx_bfr_binding_info.size() < (vertex_binding + 1)) { skip |= LogError(pCB->commandBuffer(), vuid.vertex_binding, "%s: %s expects that this Command Buffer's vertex binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", caller, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), vertex_binding, i, vertex_binding); } else if ((current_vtx_bfr_binding_info[vertex_binding].buffer_state == nullptr) && !enabled_features.robustness2_features.nullDescriptor) { skip |= LogError(pCB->commandBuffer(), vuid.vertex_binding_null, "%s: Vertex binding %d must not be VK_NULL_HANDLE %s expects that this Command Buffer's vertex " "binding Index %u should be set via " "vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct at " "index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.", caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), vertex_binding, i, vertex_binding); } } // Verify vertex attribute address alignment for (size_t i = 0; i < pPipeline->vertex_attribute_descriptions_.size(); i++) { const auto &attribute_description = pPipeline->vertex_attribute_descriptions_[i]; const auto vertex_binding = attribute_description.binding; const auto attribute_offset = attribute_description.offset; const auto &vertex_binding_map_it = pPipeline->vertex_binding_to_index_map_.find(vertex_binding); if ((vertex_binding_map_it != pPipeline->vertex_binding_to_index_map_.cend()) && (vertex_binding < current_vtx_bfr_binding_info.size()) && ((current_vtx_bfr_binding_info[vertex_binding].buffer_state) || enabled_features.robustness2_features.nullDescriptor)) { auto vertex_buffer_stride = pPipeline->vertex_binding_descriptions_[vertex_binding_map_it->second].stride; if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT)) { vertex_buffer_stride = static_cast<uint32_t>(current_vtx_bfr_binding_info[vertex_binding].stride); uint32_t attribute_binding_extent = attribute_description.offset + FormatElementSize(attribute_description.format); if (vertex_buffer_stride < attribute_binding_extent) { skip |= LogError(pCB->commandBuffer(), "VUID-vkCmdBindVertexBuffers2EXT-pStrides-03363", "The pStrides[%u] (%u) parameter in the last call to vkCmdBindVertexBuffers2EXT is less than " "the extent of the binding for attribute %zu (%u).", vertex_binding, vertex_buffer_stride, i, attribute_binding_extent); } } const auto vertex_buffer_offset = current_vtx_bfr_binding_info[vertex_binding].offset; // Use 1 as vertex/instance index to use buffer stride as well const auto attrib_address = vertex_buffer_offset + vertex_buffer_stride + attribute_offset; VkDeviceSize vtx_attrib_req_alignment = pPipeline->vertex_attribute_alignments_[i]; if (SafeModulo(attrib_address, vtx_attrib_req_alignment) != 0) { LogObjectList objlist(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer()); objlist.add(state.pipeline_state->pipeline()); skip |= LogError( objlist, vuid.vertex_binding_attribute, "%s: Invalid attribAddress alignment for vertex attribute " PRINTF_SIZE_T_SPECIFIER ", %s,from of %s and vertex %s.", caller, i, string_VkFormat(attribute_description.format), report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), report_data->FormatHandle(current_vtx_bfr_binding_info[vertex_binding].buffer_state->buffer()).c_str()); } } else { LogObjectList objlist(pCB->commandBuffer()); objlist.add(state.pipeline_state->pipeline()); skip |= LogError(objlist, vuid.vertex_binding_attribute, "%s: binding #%" PRIu32 " in pVertexAttributeDescriptions of %s is invalid in vkCmdBindVertexBuffers of %s.", caller, vertex_binding, report_data->FormatHandle(state.pipeline_state->pipeline()).c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str()); } } } // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count. // Skip check if rasterization is disabled, if there is no viewport, or if viewport/scissors are being inherited. bool dyn_viewport = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT); if ((!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) && pPipeline->graphicsPipelineCI.pViewportState && pCB->inheritedViewportDepths.size() == 0) { bool dyn_scissor = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR); // NB (akeley98): Current validation layers do not detect the error where vkCmdSetViewport (or scissor) was called, but // the dynamic state set is overwritten by binding a graphics pipeline with static viewport (scissor) state. // This condition be detected by checking trashedViewportMask & viewportMask (trashedScissorMask & scissorMask) is // nonzero in the range of bits needed by the pipeline. if (dyn_viewport) { const auto required_viewports_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; const auto missing_viewport_mask = ~pCB->viewportMask & required_viewports_mask; if (missing_viewport_mask) { std::stringstream ss; ss << caller << ": Dynamic viewport(s) "; ListBits(ss, missing_viewport_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport()."; skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str()); } } if (dyn_scissor) { const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; const auto missing_scissor_mask = ~pCB->scissorMask & required_scissor_mask; if (missing_scissor_mask) { std::stringstream ss; ss << caller << ": Dynamic scissor(s) "; ListBits(ss, missing_scissor_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor()."; skip |= LogError(device, vuid.dynamic_state, "%s", ss.str().c_str()); } } bool dyn_viewport_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT); bool dyn_scissor_count = IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT); // VUID {refpage}-viewportCount-03417 if (dyn_viewport_count && !dyn_scissor_count) { const auto required_viewport_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1; const auto missing_viewport_mask = ~pCB->viewportWithCountMask & required_viewport_mask; if (missing_viewport_mask) { std::stringstream ss; ss << caller << ": Dynamic viewport with count "; ListBits(ss, missing_viewport_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewportWithCountEXT()."; skip |= LogError(device, vuid.viewport_count, "%s", ss.str().c_str()); } } // VUID {refpage}-scissorCount-03418 if (dyn_scissor_count && !dyn_viewport_count) { const auto required_scissor_mask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1; const auto missing_scissor_mask = ~pCB->scissorWithCountMask & required_scissor_mask; if (missing_scissor_mask) { std::stringstream ss; ss << caller << ": Dynamic scissor with count "; ListBits(ss, missing_scissor_mask); ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissorWithCountEXT()."; skip |= LogError(device, vuid.scissor_count, "%s", ss.str().c_str()); } } // VUID {refpage}-viewportCount-03419 if (dyn_scissor_count && dyn_viewport_count) { if (pCB->viewportWithCountMask != pCB->scissorWithCountMask) { std::stringstream ss; ss << caller << ": Dynamic viewport and scissor with count "; ListBits(ss, pCB->viewportWithCountMask ^ pCB->scissorWithCountMask); ss << " are used by pipeline state object, but were not provided via matching calls to " "vkCmdSetViewportWithCountEXT and vkCmdSetScissorWithCountEXT()."; skip |= LogError(device, vuid.viewport_scissor_count, "%s", ss.str().c_str()); } } } // If inheriting viewports, verify that not using more than inherited. if (pCB->inheritedViewportDepths.size() != 0 && dyn_viewport) { uint32_t viewport_count = pPipeline->graphicsPipelineCI.pViewportState->viewportCount; uint32_t max_inherited = uint32_t(pCB->inheritedViewportDepths.size()); if (viewport_count > max_inherited) { skip |= LogError(device, vuid.dynamic_state, "Pipeline requires more viewports (%u) than inherited (viewportDepthCount=%u).", unsigned(viewport_count), unsigned(max_inherited)); } } // Verify that any MSAA request in PSO matches sample# in bound FB // Skip the check if rasterization is disabled. if (!pPipeline->graphicsPipelineCI.pRasterizationState || (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) { VkSampleCountFlagBits pso_num_samples = GetNumSamples(pPipeline); if (pCB->activeRenderPass) { const auto render_pass_info = pCB->activeRenderPass->createInfo.ptr(); const VkSubpassDescription2 *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass]; uint32_t i; unsigned subpass_num_samples = 0; for (i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<unsigned>(render_pass_info->pAttachments[attachment].samples); } if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples) && ((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) { LogObjectList objlist(pPipeline->pipeline()); objlist.add(pCB->activeRenderPass->renderPass()); skip |= LogError(objlist, vuid.rasterization_samples, "%s: In %s the sample count is %s while the current %s has %s and they need to be the same.", caller, report_data->FormatHandle(pPipeline->pipeline()).c_str(), string_VkSampleCountFlagBits(pso_num_samples), report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str(), string_VkSampleCountFlags(static_cast<VkSampleCountFlags>(subpass_num_samples)).c_str()); } } else { skip |= LogError(pPipeline->pipeline(), kVUID_Core_DrawState_NoActiveRenderpass, "%s: No active render pass found at draw-time in %s!", caller, report_data->FormatHandle(pPipeline->pipeline()).c_str()); } } // Verify that PSO creation renderPass is compatible with active renderPass if (pCB->activeRenderPass) { // TODO: AMD extension codes are included here, but actual function entrypoints are not yet intercepted if (pCB->activeRenderPass->renderPass() != pPipeline->rp_state->renderPass()) { // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with skip |= ValidateRenderPassCompatibility("active render pass", pCB->activeRenderPass.get(), "pipeline state object", pPipeline->rp_state.get(), caller, vuid.render_pass_compatible); } if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) { skip |= LogError(pPipeline->pipeline(), vuid.subpass_index, "%s: Pipeline was built for subpass %u but used in subpass %u.", caller, pPipeline->graphicsPipelineCI.subpass, pCB->activeSubpass); } // Check if depth stencil attachment was created with sample location compatible bit if (pPipeline->sample_location_enabled == VK_TRUE) { const safe_VkAttachmentReference2 *ds_attachment = pCB->activeRenderPass->createInfo.pSubpasses[pCB->activeSubpass].pDepthStencilAttachment; const FRAMEBUFFER_STATE *fb_state = pCB->activeFramebuffer.get(); if ((ds_attachment != nullptr) && (fb_state != nullptr)) { const uint32_t attachment = ds_attachment->attachment; if (attachment != VK_ATTACHMENT_UNUSED) { const auto *imageview_state = pCB->GetActiveAttachmentImageViewState(attachment); if (imageview_state != nullptr) { const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image); if (image_state != nullptr) { if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT) == 0) { skip |= LogError(pPipeline->pipeline(), vuid.sample_location, "%s: sampleLocationsEnable is true for the pipeline, but the subpass (%u) depth " "stencil attachment's VkImage was not created with " "VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT.", caller, pCB->activeSubpass); } } } } } } } skip |= ValidateStatus(pCB, CBSTATUS_PATCH_CONTROL_POINTS_SET, "Dynamic patch control points not set for this command buffer", vuid.patch_control_points); skip |= ValidateStatus(pCB, CBSTATUS_RASTERIZER_DISCARD_ENABLE_SET, "Dynamic rasterizer discard enable not set for this command buffer", vuid.rasterizer_discard_enable); skip |= ValidateStatus(pCB, CBSTATUS_DEPTH_BIAS_ENABLE_SET, "Dynamic depth bias enable not set for this command buffer", vuid.depth_bias_enable); skip |= ValidateStatus(pCB, CBSTATUS_LOGIC_OP_SET, "Dynamic state logicOp not set for this command buffer", vuid.logic_op); skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_RESTART_ENABLE_SET, "Dynamic primitive restart enable not set for this command buffer", vuid.primitive_restart_enable); skip |= ValidateStatus(pCB, CBSTATUS_VERTEX_INPUT_BINDING_STRIDE_SET, "Dynamic vertex input binding stride not set for this command buffer", vuid.vertex_input_binding_stride); skip |= ValidateStatus(pCB, CBSTATUS_VERTEX_INPUT_SET, "Dynamic vertex input not set for this command buffer", vuid.vertex_input); // VUID {refpage}-primitiveTopology-03420 skip |= ValidateStatus(pCB, CBSTATUS_PRIMITIVE_TOPOLOGY_SET, "Dynamic primitive topology state not set for this command buffer", vuid.primitive_topology); if (IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT)) { bool compatible_topology = false; switch (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology) { case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_POINT_LIST: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_LINE_LIST: case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY: case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY: compatible_topology = true; break; default: break; } break; case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: switch (pCB->primitiveTopology) { case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST: compatible_topology = true; break; default: break; } break; default: break; } if (!compatible_topology) { skip |= LogError(pPipeline->pipeline(), vuid.primitive_topology, "%s: the last primitive topology %s state set by vkCmdSetPrimitiveTopologyEXT is " "not compatible with the pipeline topology %s.", caller, string_VkPrimitiveTopology(pCB->primitiveTopology), string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } } if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) { skip |= ValidateGraphicsPipelineShaderDynamicState(pPipeline, pCB, caller, vuid); } return skip; } // For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to // pipelineLayout[layoutIndex] static bool VerifySetLayoutCompatibility(const debug_report_data *report_data, const cvdescriptorset::DescriptorSet *descriptor_set, PIPELINE_LAYOUT_STATE const *pipeline_layout, const uint32_t layoutIndex, string &errorMsg) { auto num_sets = pipeline_layout->set_layouts.size(); if (layoutIndex >= num_sets) { stringstream error_str; error_str << report_data->FormatHandle(pipeline_layout->layout()) << ") only contains " << num_sets << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index " << layoutIndex; errorMsg = error_str.str(); return false; } if (descriptor_set->IsPushDescriptor()) return true; auto layout_node = pipeline_layout->set_layouts[layoutIndex].get(); return cvdescriptorset::VerifySetLayoutCompatibility(report_data, layout_node, descriptor_set->GetLayout().get(), &errorMsg); } // Validate overall state at the time of a draw call bool CoreChecks::ValidateCmdBufDrawState(const CMD_BUFFER_STATE *cb_node, CMD_TYPE cmd_type, const bool indexed, const VkPipelineBindPoint bind_point, const char *function) const { const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const auto lv_bind_point = ConvertToLvlBindPoint(bind_point); const auto &state = cb_node->lastBound[lv_bind_point]; const auto *pipe = state.pipeline_state; if (nullptr == pipe) { return LogError(cb_node->commandBuffer(), vuid.pipeline_bound, "Must not call %s on this command buffer while there is no %s pipeline bound.", function, bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR ? "RayTracing" : bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS ? "Graphics" : "Compute"); } bool result = false; if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) { // First check flag states result |= ValidateDrawStateFlags(cb_node, pipe, indexed, vuid.dynamic_state); if (cb_node->activeRenderPass && cb_node->activeFramebuffer) { // Verify attachments for unprotected/protected command buffer. if (enabled_features.core11.protectedMemory == VK_TRUE && cb_node->active_attachments) { uint32_t i = 0; for (const auto &view_state : *cb_node->active_attachments.get()) { const auto &subpass = cb_node->active_subpasses->at(i); if (subpass.used && view_state && !view_state->Destroyed()) { std::string image_desc = "Image is "; image_desc.append(string_VkImageUsageFlagBits(subpass.usage)); // Because inputAttachment is read only, it doesn't need to care protected command buffer case. // Some CMD_TYPE could not be protected. See VUID 02711. if (subpass.usage != VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT && vuid.protected_command_buffer != kVUIDUndefined) { result |= ValidateUnprotectedImage(cb_node, view_state->image_state.get(), function, vuid.protected_command_buffer, image_desc.c_str()); } result |= ValidateProtectedImage(cb_node, view_state->image_state.get(), function, vuid.unprotected_command_buffer, image_desc.c_str()); } ++i; } } } } // Now complete other state checks string error_string; auto const &pipeline_layout = pipe->pipeline_layout.get(); // Check if the current pipeline is compatible for the maximum used set with the bound sets. if (pipe->active_slots.size() > 0 && !CompatForSet(pipe->max_active_slot, state, pipeline_layout->compat_for_set)) { LogObjectList objlist(pipe->pipeline()); objlist.add(pipeline_layout->layout()); objlist.add(state.pipeline_layout); result |= LogError(objlist, vuid.compatible_pipeline, "%s(): %s defined with %s is not compatible for maximum set statically used %" PRIu32 " with bound descriptor sets, last bound with %s", CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline()).c_str(), report_data->FormatHandle(pipeline_layout->layout()).c_str(), pipe->max_active_slot, report_data->FormatHandle(state.pipeline_layout).c_str()); } for (const auto &set_binding_pair : pipe->active_slots) { uint32_t set_index = set_binding_pair.first; // If valid set is not bound throw an error if ((state.per_set.size() <= set_index) || (!state.per_set[set_index].bound_descriptor_set)) { result |= LogError(cb_node->commandBuffer(), kVUID_Core_DrawState_DescriptorSetNotBound, "%s(): %s uses set #%u but that set is not bound.", CommandTypeString(cmd_type), report_data->FormatHandle(pipe->pipeline()).c_str(), set_index); } else if (!VerifySetLayoutCompatibility(report_data, state.per_set[set_index].bound_descriptor_set, pipeline_layout, set_index, error_string)) { // Set is bound but not compatible w/ overlapping pipeline_layout from PSO VkDescriptorSet set_handle = state.per_set[set_index].bound_descriptor_set->GetSet(); LogObjectList objlist(set_handle); objlist.add(pipeline_layout->layout()); result |= LogError(objlist, kVUID_Core_DrawState_PipelineLayoutsIncompatible, "%s(): %s bound as set #%u is not compatible with overlapping %s due to: %s", CommandTypeString(cmd_type), report_data->FormatHandle(set_handle).c_str(), set_index, report_data->FormatHandle(pipeline_layout->layout()).c_str(), error_string.c_str()); } else { // Valid set is bound and layout compatible, validate that it's updated // Pull the set node const cvdescriptorset::DescriptorSet *descriptor_set = state.per_set[set_index].bound_descriptor_set; // Validate the draw-time state for this descriptor set std::string err_str; // For the "bindless" style resource usage with many descriptors, need to optimize command <-> descriptor // binding validation. Take the requested binding set and prefilter it to eliminate redundant validation checks. // Here, the currently bound pipeline determines whether an image validation check is redundant... // for images are the "req" portion of the binding_req is indirectly (but tightly) coupled to the pipeline. cvdescriptorset::PrefilterBindRequestMap reduced_map(*descriptor_set, set_binding_pair.second); const auto &binding_req_map = reduced_map.FilteredMap(*cb_node, *pipe); // We can skip validating the descriptor set if "nothing" has changed since the last validation. // Same set, no image layout changes, and same "pipeline state" (binding_req_map). If there are // any dynamic descriptors, always revalidate rather than caching the values. We currently only // apply this optimization if IsManyDescriptors is true, to avoid the overhead of copying the // binding_req_map which could potentially be expensive. bool descriptor_set_changed = !reduced_map.IsManyDescriptors() || // Revalidate each time if the set has dynamic offsets state.per_set[set_index].dynamicOffsets.size() > 0 || // Revalidate if descriptor set (or contents) has changed state.per_set[set_index].validated_set != descriptor_set || state.per_set[set_index].validated_set_change_count != descriptor_set->GetChangeCount() || (!disabled[image_layout_validation] && state.per_set[set_index].validated_set_image_layout_change_count != cb_node->image_layout_change_count); bool need_validate = descriptor_set_changed || // Revalidate if previous bindingReqMap doesn't include new bindingReqMap !std::includes(state.per_set[set_index].validated_set_binding_req_map.begin(), state.per_set[set_index].validated_set_binding_req_map.end(), binding_req_map.begin(), binding_req_map.end()); if (need_validate) { if (!descriptor_set_changed && reduced_map.IsManyDescriptors()) { // Only validate the bindings that haven't already been validated BindingReqMap delta_reqs; std::set_difference(binding_req_map.begin(), binding_req_map.end(), state.per_set[set_index].validated_set_binding_req_map.begin(), state.per_set[set_index].validated_set_binding_req_map.end(), layer_data::insert_iterator<BindingReqMap>(delta_reqs, delta_reqs.begin())); result |= ValidateDrawState(descriptor_set, delta_reqs, state.per_set[set_index].dynamicOffsets, cb_node, cb_node->active_attachments.get(), cb_node->active_subpasses.get(), function, vuid); } else { result |= ValidateDrawState(descriptor_set, binding_req_map, state.per_set[set_index].dynamicOffsets, cb_node, cb_node->active_attachments.get(), cb_node->active_subpasses.get(), function, vuid); } } } } // Check general pipeline state that needs to be validated at drawtime if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) { result |= ValidatePipelineDrawtimeState(state, cb_node, cmd_type, pipe, function); } // Verify if push constants have been set // NOTE: Currently not checking whether active push constants are compatible with the active pipeline, nor whether the // "life times" of push constants are correct. // Discussion on validity of these checks can be found at https://gitlab.khronos.org/vulkan/vulkan/-/issues/2602. if (!cb_node->push_constant_data_ranges || (pipeline_layout->push_constant_ranges == cb_node->push_constant_data_ranges)) { for (const auto &stage : pipe->stage_state) { const auto *entrypoint = stage.shader_state.get()->FindEntrypointStruct(stage.entry_point_name.c_str(), stage.stage_flag); if (!entrypoint || !entrypoint->push_constant_used_in_shader.IsUsed()) { continue; } // Edge case where if the shader is using push constants statically and there never was a vkCmdPushConstants if (!cb_node->push_constant_data_ranges) { LogObjectList objlist(cb_node->commandBuffer()); objlist.add(pipeline_layout->layout()); objlist.add(pipe->pipeline()); result |= LogError(objlist, vuid.push_constants_set, "%s(): Shader in %s uses push-constant statically but vkCmdPushConstants was not called yet for " "pipeline layout %s.", CommandTypeString(cmd_type), string_VkShaderStageFlags(stage.stage_flag).c_str(), report_data->FormatHandle(pipeline_layout->layout()).c_str()); } const auto it = cb_node->push_constant_data_update.find(stage.stage_flag); if (it == cb_node->push_constant_data_update.end()) { // This error has been printed in ValidatePushConstantUsage. break; } } } return result; } bool CoreChecks::ValidatePipelineLocked(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const { bool skip = false; const PIPELINE_STATE *pipeline = pPipelines[pipelineIndex].get(); // If create derivative bit is set, check that we've specified a base // pipeline correctly, and that the base pipeline was created to allow // derivatives. if (pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { const PIPELINE_STATE *base_pipeline = nullptr; if (!((pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^ (pipeline->graphicsPipelineCI.basePipelineIndex != -1))) { // TODO: This check is a superset of VUID-VkGraphicsPipelineCreateInfo-flags-00724 and // TODO: VUID-VkGraphicsPipelineCreateInfo-flags-00725 skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo[%d]: exactly one of base pipeline index and handle must be specified", pipelineIndex); } else if (pipeline->graphicsPipelineCI.basePipelineIndex != -1) { if (pipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) { skip |= LogError(device, "VUID-vkCreateGraphicsPipelines-flags-00720", "Invalid Pipeline CreateInfo[%d]: base pipeline must occur earlier in array than derivative pipeline.", pipelineIndex); } else { base_pipeline = pPipelines[pipeline->graphicsPipelineCI.basePipelineIndex].get(); } } else if (pipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = GetPipelineState(pipeline->graphicsPipelineCI.basePipelineHandle); } if (base_pipeline && !(base_pipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError(device, kVUID_Core_DrawState_InvalidPipelineCreateState, "Invalid Pipeline CreateInfo[%d]: base pipeline does not allow derivatives.", pipelineIndex); } } // Check for portability errors if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { if ((VK_FALSE == enabled_features.portability_subset_features.triangleFans) && (VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN == pipeline->topology_at_rasterizer)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-triangleFans-04452", "Invalid Pipeline CreateInfo[%d] (portability error): VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN is not supported", pipelineIndex); } // Validate vertex inputs for (const auto &desc : pipeline->vertex_binding_descriptions_) { if ((desc.stride < phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) || ((desc.stride % phys_dev_ext_props.portability_props.minVertexInputBindingStrideAlignment) != 0)) { skip |= LogError( device, "VUID-VkVertexInputBindingDescription-stride-04456", "Invalid Pipeline CreateInfo[%d] (portability error): Vertex input stride must be at least as large as and a " "multiple of VkPhysicalDevicePortabilitySubsetPropertiesKHR::minVertexInputBindingStrideAlignment.", pipelineIndex); } } // Validate vertex attributes if (VK_FALSE == enabled_features.portability_subset_features.vertexAttributeAccessBeyondStride) { for (const auto &attrib : pipeline->vertex_attribute_descriptions_) { const auto vertex_binding_map_it = pipeline->vertex_binding_to_index_map_.find(attrib.binding); if (vertex_binding_map_it != pipeline->vertex_binding_to_index_map_.cend()) { const auto& desc = pipeline->vertex_binding_descriptions_[vertex_binding_map_it->second]; if ((attrib.offset + FormatElementSize(attrib.format)) > desc.stride) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-vertexAttributeAccessBeyondStride-04457", "Invalid Pipeline CreateInfo[%d] (portability error): (attribute.offset + " "sizeof(vertex_description.format)) is larger than the vertex stride", pipelineIndex); } } } } // Validate polygon mode auto raster_state_ci = pipeline->graphicsPipelineCI.pRasterizationState; if ((VK_FALSE == enabled_features.portability_subset_features.pointPolygons) && raster_state_ci && (VK_FALSE == raster_state_ci->rasterizerDiscardEnable) && (VK_POLYGON_MODE_POINT == raster_state_ci->polygonMode)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-pointPolygons-04458", "Invalid Pipeline CreateInfo[%d] (portability error): point polygons are not supported", pipelineIndex); } } return skip; } // UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function. bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const { bool skip = false; // Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState // produces nonsense errors that confuse users. Other layers should already // emit errors for renderpass being invalid. auto subpass_desc = &pPipeline->rp_state->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass]; if (pPipeline->graphicsPipelineCI.subpass >= pPipeline->rp_state->createInfo.subpassCount) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00759", "Invalid Pipeline CreateInfo[%u] State: Subpass index %u is out of range for this renderpass (0..%u).", pipelineIndex, pPipeline->graphicsPipelineCI.subpass, pPipeline->rp_state->createInfo.subpassCount - 1); subpass_desc = nullptr; } if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) { const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState; if (subpass_desc && color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746", "vkCreateGraphicsPipelines() pCreateInfo[%u]: %s subpass %u has colorAttachmentCount of %u which doesn't " "match the pColorBlendState->attachmentCount of %u.", pipelineIndex, report_data->FormatHandle(pPipeline->rp_state->renderPass()).c_str(), pPipeline->graphicsPipelineCI.subpass, subpass_desc->colorAttachmentCount, color_blend_state->attachmentCount); } if (!enabled_features.core.independentBlend) { if (pPipeline->attachments.size() > 1) { const VkPipelineColorBlendAttachmentState *const attachments = &pPipeline->attachments[0]; for (size_t i = 1; i < pPipeline->attachments.size(); i++) { // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains // only attachment state, so memcmp is best suited for the comparison if (memcmp(static_cast<const void *>(attachments), static_cast<const void *>(&attachments[i]), sizeof(attachments[0]))) { skip |= LogError(device, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605", "Invalid Pipeline CreateInfo[%u]: If independent blend feature not enabled, all elements of " "pAttachments must be identical.", pipelineIndex); break; } } } } if (!enabled_features.core.logicOp && (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) { skip |= LogError( device, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606", "Invalid Pipeline CreateInfo[%u]: If logic operations feature not enabled, logicOpEnable must be VK_FALSE.", pipelineIndex); } for (size_t i = 0; i < pPipeline->attachments.size(); i++) { if ((pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcColorBlendFactor); } } if ((pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstColorBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstColorBlendFactor); } } if ((pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].srcAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].srcAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].srcAlphaBlendFactor); } } if ((pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_SRC1_ALPHA) || (pPipeline->attachments[i].dstAlphaBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA)) { if (!enabled_features.core.dualSrcBlend) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611", "vkCreateGraphicsPipelines(): pPipelines[%d].pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].dstAlphaBlendFactor uses a dual-source blend factor (%d), but this device feature is not " "enabled.", pipelineIndex, i, pPipeline->attachments[i].dstAlphaBlendFactor); } } } } if (ValidateGraphicsPipelineShaderState(pPipeline)) { skip = true; } // Each shader's stage must be unique if (pPipeline->duplicate_shaders) { for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) { if (pPipeline->duplicate_shaders & stage) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00726", "Invalid Pipeline CreateInfo[%u] State: Multiple shaders provided for stage %s", pipelineIndex, string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage))); } } } if (!enabled_features.core.geometryShader && (pPipeline->active_shaders & VK_SHADER_STAGE_GEOMETRY_BIT)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00704", "Invalid Pipeline CreateInfo[%u] State: Geometry Shader not supported.", pipelineIndex); } if (!enabled_features.core.tessellationShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT || pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-00705", "Invalid Pipeline CreateInfo[%u] State: Tessellation Shader not supported.", pipelineIndex); } if (device_extensions.vk_nv_mesh_shader) { // VS or mesh is required if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-02096", "Invalid Pipeline CreateInfo[%u] State: Vertex Shader or Mesh Shader required.", pipelineIndex); } // Can't mix mesh and VTG if ((pPipeline->active_shaders & (VK_SHADER_STAGE_MESH_BIT_NV | VK_SHADER_STAGE_TASK_BIT_NV)) && (pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_GEOMETRY_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02095", "Invalid Pipeline CreateInfo[%u] State: Geometric shader stages must either be all mesh (mesh | task) " "or all VTG (vertex, tess control, tess eval, geom).", pipelineIndex); } } else { // VS is required if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00727", "Invalid Pipeline CreateInfo[%u] State: Vertex Shader required.", pipelineIndex); } } if (!enabled_features.mesh_shader.meshShader && (pPipeline->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02091", "Invalid Pipeline CreateInfo[%u] State: Mesh Shader not supported.", pipelineIndex); } if (!enabled_features.mesh_shader.taskShader && (pPipeline->active_shaders & VK_SHADER_STAGE_TASK_BIT_NV)) { skip |= LogError(device, "VUID-VkPipelineShaderStageCreateInfo-stage-02092", "Invalid Pipeline CreateInfo[%u] State: Task Shader not supported.", pipelineIndex); } // Either both or neither TC/TE shaders should be defined bool has_control = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0; bool has_eval = (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) != 0; if (has_control && !has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729", "Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.", pipelineIndex); } if (!has_control && has_eval) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730", "Invalid Pipeline CreateInfo[%u] State: TE and TC shaders must be included or excluded as a pair.", pipelineIndex); } // Compute shaders should be specified independent of Gfx shaders if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-stage-00728", "Invalid Pipeline CreateInfo[%u] State: Do not specify Compute Shader for Gfx Pipeline.", pipelineIndex); } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pInputAssemblyState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02098", "Invalid Pipeline CreateInfo[%u] State: Missing pInputAssemblyState.", pipelineIndex); } // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines. // Mismatching primitive topology and tessellation fails graphics pipeline creation. if (has_control && has_eval && (!pPipeline->graphicsPipelineCI.pInputAssemblyState || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736", "Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA topology for " "tessellation pipelines.", pipelineIndex); } if (pPipeline->graphicsPipelineCI.pInputAssemblyState) { if (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) { if (!has_control || !has_eval) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-topology-00737", "Invalid Pipeline CreateInfo[%u] State: VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid " "for tessellation pipelines.", pipelineIndex); } } if ((pPipeline->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_POINT_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError( device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and primitiveRestartEnable is VK_TRUE. It is invalid.", pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.geometryShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY || pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429", "vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and geometry shaders feature is not enabled. " "It is invalid.", pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } if ((enabled_features.core.tessellationShader == VK_FALSE) && (pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) { skip |= LogError(device, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430", "vkCreateGraphicsPipelines() pCreateInfo[%u]: topology is %s and tessellation shaders feature is not " "enabled. It is invalid.", pipelineIndex, string_VkPrimitiveTopology(pPipeline->graphicsPipelineCI.pInputAssemblyState->topology)); } } // If a rasterization state is provided... if (pPipeline->graphicsPipelineCI.pRasterizationState) { if ((pPipeline->graphicsPipelineCI.pRasterizationState->depthClampEnable == VK_TRUE) && (!enabled_features.core.depthClamp)) { skip |= LogError(device, "VUID-VkPipelineRasterizationStateCreateInfo-depthClampEnable-00782", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthClamp device feature is disabled: the " "depthClampEnable member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to VK_FALSE.", pipelineIndex); } if (!IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS) && (pPipeline->graphicsPipelineCI.pRasterizationState->depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00754", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBiasClamp device feature is disabled: the " "depthBiasClamp member " "of the VkPipelineRasterizationStateCreateInfo structure must be set to 0.0 unless the " "VK_DYNAMIC_STATE_DEPTH_BIAS dynamic state is enabled", pipelineIndex); } // If rasterization is enabled... if (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) { if ((pPipeline->graphicsPipelineCI.pMultisampleState->alphaToOneEnable == VK_TRUE) && (!enabled_features.core.alphaToOne)) { skip |= LogError( device, "VUID-VkPipelineMultisampleStateCreateInfo-alphaToOneEnable-00785", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the alphaToOne device feature is disabled: the alphaToOneEnable " "member of the VkPipelineMultisampleStateCreateInfo structure must be set to VK_FALSE.", pipelineIndex); } // If subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a valid structure if (subpass_desc && subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (!pPipeline->graphicsPipelineCI.pDepthStencilState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00752", "Invalid Pipeline CreateInfo[%u] State: pDepthStencilState is NULL when rasterization is enabled " "and subpass uses a depth/stencil attachment.", pipelineIndex); } else if (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) { if (!enabled_features.core.depthBounds) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-depthBoundsTestEnable-00598", "vkCreateGraphicsPipelines() pCreateInfo[%u]: the depthBounds device feature is disabled: the " "depthBoundsTestEnable member of the VkPipelineDepthStencilStateCreateInfo structure must be " "set to VK_FALSE.", pipelineIndex); } // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!device_extensions.vk_ext_depth_range_unrestricted && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS)) { const float minDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->minDepthBounds; const float maxDepthBounds = pPipeline->graphicsPipelineCI.pDepthStencilState->maxDepthBounds; // Also VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00755 if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510", "vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension " "is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is " "true, and pDepthStencilState::minDepthBounds (=%f) is not within the [0.0, 1.0] range.", pipelineIndex, minDepthBounds); } if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510", "vkCreateGraphicsPipelines() pCreateInfo[%u]: VK_EXT_depth_range_unrestricted extension " "is not enabled, VK_DYNAMIC_STATE_DEPTH_BOUNDS is not used, depthBoundsTestEnable is " "true, and pDepthStencilState::maxDepthBounds (=%f) is not within the [0.0, 1.0] range.", pipelineIndex, maxDepthBounds); } } } } // If subpass uses color attachments, pColorBlendState must be valid pointer if (subpass_desc) { uint32_t color_attachment_count = 0; for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { ++color_attachment_count; } } if (color_attachment_count > 0 && pPipeline->graphicsPipelineCI.pColorBlendState == nullptr) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753", "Invalid Pipeline CreateInfo[%u] State: pColorBlendState is NULL when rasterization is enabled and " "subpass uses color attachments.", pipelineIndex); } } } auto provoking_vertex_state_ci = lvl_find_in_chain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>( pPipeline->graphicsPipelineCI.pRasterizationState->pNext); if (provoking_vertex_state_ci && provoking_vertex_state_ci->provokingVertexMode == VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT && !enabled_features.provoking_vertex_features.provokingVertexLast) { skip |= LogError( device, "VUID-VkPipelineRasterizationProvokingVertexStateCreateInfoEXT-provokingVertexMode-04883", "provokingVertexLast feature is not enabled."); } } if ((pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT) && !pPipeline->graphicsPipelineCI.pVertexInputState) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pStages-02097", "Invalid Pipeline CreateInfo[%u] State: Missing pVertexInputState.", pipelineIndex); } auto vi = pPipeline->graphicsPipelineCI.pVertexInputState; if (vi != NULL) { for (uint32_t j = 0; j < vi->vertexAttributeDescriptionCount; j++) { VkFormat format = vi->pVertexAttributeDescriptions[j].format; // Internal call to get format info. Still goes through layers, could potentially go directly to ICD. VkFormatProperties properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &properties); if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) { skip |= LogError(device, "VUID-VkVertexInputAttributeDescription-format-00623", "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format " "(%s) is not a supported vertex buffer format.", pipelineIndex, j, string_VkFormat(format)); } } } if (subpass_desc && pPipeline->graphicsPipelineCI.pMultisampleState) { const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pPipeline->graphicsPipelineCI.pMultisampleState; auto accum_color_samples = [subpass_desc, pPipeline](uint32_t &samples) { for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } }; if (!(device_extensions.vk_amd_mixed_attachment_samples || device_extensions.vk_nv_framebuffer_mixed_samples)) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_num_samples = 0; accum_color_samples(subpass_num_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_num_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } // subpass_num_samples is 0 when the subpass has no attachments or if all attachments are VK_ATTACHMENT_UNUSED. // Only validate the value of subpass_num_samples if the subpass has attachments that are not VK_ATTACHMENT_UNUSED. if (subpass_num_samples && (!IsPowerOfTwo(subpass_num_samples) || (subpass_num_samples != raster_samples))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass color and/or depth attachment.", pipelineIndex, raster_samples); } } if (device_extensions.vk_amd_mixed_attachment_samples) { VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0); for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) { if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max( max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pColorAttachments[i].attachment].samples); } } if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { max_sample_count = std::max( max_sample_count, pPipeline->rp_state->createInfo.pAttachments[subpass_desc->pDepthStencilAttachment->attachment].samples); } if ((pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE) && (max_sample_count != static_cast<VkSampleCountFlagBits>(0)) && (multisample_state->rasterizationSamples != max_sample_count)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%s) != max " "attachment samples (%s) used in subpass %u.", pipelineIndex, string_VkSampleCountFlagBits(multisample_state->rasterizationSamples), string_VkSampleCountFlagBits(max_sample_count), pPipeline->graphicsPipelineCI.subpass); } } if (device_extensions.vk_nv_framebuffer_mixed_samples) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; accum_color_samples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; const uint32_t subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); if (pPipeline->graphicsPipelineCI.pDepthStencilState) { const bool ds_test_enabled = (pPipeline->graphicsPipelineCI.pDepthStencilState->depthTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE) || (pPipeline->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE); if (ds_test_enabled && (!IsPowerOfTwo(subpass_depth_samples) || (raster_samples != subpass_depth_samples))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "does not match the number of samples of the RenderPass depth attachment (%u).", pipelineIndex, raster_samples, subpass_depth_samples); } } } if (IsPowerOfTwo(subpass_color_samples)) { if (raster_samples < subpass_color_samples) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) " "is not greater or equal to the number of samples of the RenderPass color attachment (%u).", pipelineIndex, raster_samples, subpass_color_samples); } if (multisample_state) { if ((raster_samples > subpass_color_samples) && (multisample_state->sampleShadingEnable == VK_TRUE)) { skip |= LogError(device, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415", "vkCreateGraphicsPipelines: pCreateInfo[%d].pMultisampleState->sampleShadingEnable must be " "VK_FALSE when " "pCreateInfo[%d].pMultisampleState->rasterizationSamples (%u) is greater than the number of " "samples of the " "subpass color attachment (%u).", pipelineIndex, pipelineIndex, raster_samples, subpass_color_samples); } const auto *coverage_modulation_state = LvlFindInChain<VkPipelineCoverageModulationStateCreateInfoNV>(multisample_state->pNext); if (coverage_modulation_state && (coverage_modulation_state->coverageModulationTableEnable == VK_TRUE)) { if (coverage_modulation_state->coverageModulationTableCount != (raster_samples / subpass_color_samples)) { skip |= LogError( device, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405", "vkCreateGraphicsPipelines: pCreateInfos[%d] VkPipelineCoverageModulationStateCreateInfoNV " "coverageModulationTableCount of %u is invalid.", pipelineIndex, coverage_modulation_state->coverageModulationTableCount); } } } } } if (device_extensions.vk_nv_coverage_reduction_mode) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_color_samples = 0; uint32_t subpass_depth_samples = 0; accum_color_samples(subpass_color_samples); if (subpass_desc->pDepthStencilAttachment && subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const auto attachment = subpass_desc->pDepthStencilAttachment->attachment; subpass_depth_samples = static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } if (multisample_state && IsPowerOfTwo(subpass_color_samples) && (subpass_depth_samples == 0 || IsPowerOfTwo(subpass_depth_samples))) { const auto *coverage_reduction_state = LvlFindInChain<VkPipelineCoverageReductionStateCreateInfoNV>(multisample_state->pNext); if (coverage_reduction_state) { const VkCoverageReductionModeNV coverage_reduction_mode = coverage_reduction_state->coverageReductionMode; uint32_t combination_count = 0; std::vector<VkFramebufferMixedSamplesCombinationNV> combinations; DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count, nullptr); combinations.resize(combination_count); DispatchGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(physical_device, &combination_count, &combinations[0]); bool combination_found = false; for (const auto &combination : combinations) { if (coverage_reduction_mode == combination.coverageReductionMode && raster_samples == combination.rasterizationSamples && subpass_depth_samples == combination.depthStencilSamples && subpass_color_samples == combination.colorSamples) { combination_found = true; break; } } if (!combination_found) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722", "vkCreateGraphicsPipelines: pCreateInfos[%d] the specified combination of coverage " "reduction mode (%s), pMultisampleState->rasterizationSamples (%u), sample counts for " "the subpass color and depth/stencil attachments is not a valid combination returned by " "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV.", pipelineIndex, string_VkCoverageReductionModeNV(coverage_reduction_mode), raster_samples); } } } } if (device_extensions.vk_nv_fragment_coverage_to_color) { const auto coverage_to_color_state = LvlFindInChain<VkPipelineCoverageToColorStateCreateInfoNV>(multisample_state); if (coverage_to_color_state && coverage_to_color_state->coverageToColorEnable == VK_TRUE) { bool attachment_is_valid = false; std::string error_detail; if (coverage_to_color_state->coverageToColorLocation < subpass_desc->colorAttachmentCount) { const auto& color_attachment_ref = subpass_desc->pColorAttachments[coverage_to_color_state->coverageToColorLocation]; if (color_attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { const auto& color_attachment = pPipeline->rp_state->createInfo.pAttachments[color_attachment_ref.attachment]; switch (color_attachment.format) { case VK_FORMAT_R8_UINT: case VK_FORMAT_R8_SINT: case VK_FORMAT_R16_UINT: case VK_FORMAT_R16_SINT: case VK_FORMAT_R32_UINT: case VK_FORMAT_R32_SINT: attachment_is_valid = true; break; default: std::ostringstream str; str << "references an attachment with an invalid format (" << string_VkFormat(color_attachment.format) << ")."; error_detail = str.str(); break; } } else { std::ostringstream str; str << "references an invalid attachment. The subpass pColorAttachments[" << coverage_to_color_state->coverageToColorLocation << "].attachment has the value VK_ATTACHMENT_UNUSED."; error_detail = str.str(); } } else { std::ostringstream str; str << "references an non-existing attachment since the subpass colorAttachmentCount is " << subpass_desc->colorAttachmentCount << "."; error_detail = str.str(); } if (!attachment_is_valid) { skip |= LogError(device, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", "vkCreateGraphicsPipelines: pCreateInfos[%" PRId32 "].pMultisampleState VkPipelineCoverageToColorStateCreateInfoNV " "coverageToColorLocation = %" PRIu32 " %s", pipelineIndex, coverage_to_color_state->coverageToColorLocation, error_detail.c_str()); } } } if (device_extensions.vk_ext_sample_locations) { const VkPipelineSampleLocationsStateCreateInfoEXT *sample_location_state = LvlFindInChain<VkPipelineSampleLocationsStateCreateInfoEXT>(multisample_state->pNext); if (sample_location_state != nullptr) { if ((sample_location_state->sampleLocationsEnable == VK_TRUE) && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT) == false)) { const VkSampleLocationsInfoEXT sample_location_info = sample_location_state->sampleLocationsInfo; skip |= ValidateSampleLocationsInfo(&sample_location_info, "vkCreateGraphicsPipelines"); const VkExtent2D grid_size = sample_location_info.sampleLocationGridSize; auto multisample_prop = LvlInitStruct<VkMultisamplePropertiesEXT>(); DispatchGetPhysicalDeviceMultisamplePropertiesEXT(physical_device, multisample_state->rasterizationSamples, &multisample_prop); const VkExtent2D max_grid_size = multisample_prop.maxSampleLocationGridSize; // Note order or "divide" in "sampleLocationsInfo must evenly divide VkMultisamplePropertiesEXT" if (SafeModulo(max_grid_size.width, grid_size.width) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521", "vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.width (%u) " "must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.width (%u).", pipelineIndex, grid_size.width, max_grid_size.width); } if (SafeModulo(max_grid_size.height, grid_size.height) != 0) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522", "vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationGridSize.height (%u) " "must be evenly divided by VkMultisamplePropertiesEXT::sampleLocationGridSize.height (%u).", pipelineIndex, grid_size.height, max_grid_size.height); } if (sample_location_info.sampleLocationsPerPixel != multisample_state->rasterizationSamples) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01523", "vkCreateGraphicsPipelines() pCreateInfo[%u]: Because there is no dynamic state for Sample Location " "and sampleLocationEnable is true, the " "VkPipelineSampleLocationsStateCreateInfoEXT::sampleLocationsInfo::sampleLocationsPerPixel (%s) must " "be the same as the VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%s).", pipelineIndex, string_VkSampleCountFlagBits(sample_location_info.sampleLocationsPerPixel), string_VkSampleCountFlagBits(multisample_state->rasterizationSamples)); } } } } if (device_extensions.vk_qcom_render_pass_shader_resolve) { uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline)); uint32_t subpass_input_attachment_samples = 0; for (uint32_t i = 0; i < subpass_desc->inputAttachmentCount; i++) { const auto attachment = subpass_desc->pInputAttachments[i].attachment; if (attachment != VK_ATTACHMENT_UNUSED) { subpass_input_attachment_samples |= static_cast<uint32_t>(pPipeline->rp_state->createInfo.pAttachments[attachment].samples); } } if ((subpass_desc->flags & VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM) != 0) { if (raster_samples != subpass_input_attachment_samples) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-rasterizationSamples-04899", "vkCreateGraphicsPipelines() pCreateInfo[%u]: The subpass includes " "VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM " "but the input attachment VkSampleCountFlagBits (%u) does not match the " "VkPipelineMultisampleStateCreateInfo::rasterizationSamples (%u) VkSampleCountFlagBits.", pipelineIndex, subpass_input_attachment_samples, multisample_state->rasterizationSamples); } if (multisample_state->sampleShadingEnable == VK_TRUE) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-sampleShadingEnable-04900", "vkCreateGraphicsPipelines() pCreateInfo[%u]: The subpass includes " "VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM " "which requires sample shading is disabled, but " "VkPipelineMultisampleStateCreateInfo::sampleShadingEnable is true. ", pipelineIndex); } } } } skip |= ValidatePipelineCacheControlFlags(pPipeline->graphicsPipelineCI.flags, pipelineIndex, "vkCreateGraphicsPipelines", "VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878"); // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378 if (!enabled_features.extended_dynamic_state_features.extendedDynamicState && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_CULL_MODE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRONT_FACE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_TOPOLOGY_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_BINDING_STRIDE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_WRITE_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_COMPARE_OP_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BOUNDS_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_TEST_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_STENCIL_OP_EXT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03378", "vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState feature is not enabled"); } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868 if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2 && (IsDynamic(pPipeline, VK_DYNAMIC_STATE_RASTERIZER_DISCARD_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_DEPTH_BIAS_ENABLE_EXT) || IsDynamic(pPipeline, VK_DYNAMIC_STATE_PRIMITIVE_RESTART_ENABLE_EXT))) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04868", "vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState2 feature is not enabled"); } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869 if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp && IsDynamic(pPipeline, VK_DYNAMIC_STATE_LOGIC_OP_EXT)) { skip |= LogError( device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04869", "vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState2LogicOp feature is not enabled"); } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870 if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints && IsDynamic(pPipeline, VK_DYNAMIC_STATE_PATCH_CONTROL_POINTS_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04870", "vkCreateGraphicsPipelines: Extended dynamic state used by the extendedDynamicState2PatchControlPoints " "feature is not enabled"); } const VkPipelineFragmentShadingRateStateCreateInfoKHR *fragment_shading_rate_state = LvlFindInChain<VkPipelineFragmentShadingRateStateCreateInfoKHR>(pPipeline->graphicsPipelineCI.pNext); if (fragment_shading_rate_state && !IsDynamic(pPipeline, VK_DYNAMIC_STATE_FRAGMENT_SHADING_RATE_KHR)) { const char *struct_name = "VkPipelineFragmentShadingRateStateCreateInfoKHR"; if (fragment_shading_rate_state->fragmentSize.width == 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494", "vkCreateGraphicsPipelines: Fragment width of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height == 0) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495", "vkCreateGraphicsPipelines: Fragment height of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (fragment_shading_rate_state->fragmentSize.width != 0 && !IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.width)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496", "vkCreateGraphicsPipelines: Non-power-of-two fragment width of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height != 0 && !IsPowerOfTwo(fragment_shading_rate_state->fragmentSize.height)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497", "vkCreateGraphicsPipelines: Non-power-of-two fragment height of %u has been specified in %s.", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (fragment_shading_rate_state->fragmentSize.width > 4) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498", "vkCreateGraphicsPipelines: Fragment width of %u specified in %s is too large.", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (fragment_shading_rate_state->fragmentSize.height > 4) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499", "vkCreateGraphicsPipelines: Fragment height of %u specified in %s is too large", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && fragment_shading_rate_state->fragmentSize.width != 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500", "vkCreateGraphicsPipelines: Pipeline fragment width of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", fragment_shading_rate_state->fragmentSize.width, struct_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && fragment_shading_rate_state->fragmentSize.height != 1) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500", "vkCreateGraphicsPipelines: Pipeline fragment height of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", fragment_shading_rate_state->fragmentSize.height, struct_name); } if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501", "vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but " "primitiveFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name); } if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502", "vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but " "attachmentFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && fragment_shading_rate_state->combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506", "vkCreateGraphicsPipelines: First combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is not supported", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[0]), struct_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && fragment_shading_rate_state->combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506", "vkCreateGraphicsPipelines: Second combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is not supported", string_VkFragmentShadingRateCombinerOpKHR(fragment_shading_rate_state->combinerOps[1]), struct_name); } } // VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807 if (!enabled_features.vertex_input_dynamic_state_features.vertexInputDynamicState && IsDynamic(pPipeline, VK_DYNAMIC_STATE_VERTEX_INPUT_EXT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-04807", "The vertexInputDynamicState feature must be enabled to use the VK_DYNAMIC_STATE_VERTEX_INPUT_EXT dynamic state"); } return skip; } // Block of code at start here specifically for managing/tracking DSs // Validate that given set is valid and that it's not being used by an in-flight CmdBuffer // func_str is the name of the calling function // Return false if no errors occur // Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain) bool CoreChecks::ValidateIdleDescriptorSet(VkDescriptorSet set, const char *func_str) const { if (disabled[object_in_use]) return false; bool skip = false; auto set_node = setMap.find(set); if (set_node != setMap.end()) { // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here if (set_node->second->InUse()) { skip |= LogError(set, "VUID-vkFreeDescriptorSets-pDescriptorSets-00309", "Cannot call %s() on %s that is in use by a command buffer.", func_str, report_data->FormatHandle(set).c_str()); } } return skip; } // If a renderpass is active, verify that the given command type is appropriate for current subpass state bool CoreChecks::ValidateCmdSubpassState(const CMD_BUFFER_STATE *pCB, const CMD_TYPE cmd_type) const { if (!pCB->activeRenderPass) return false; bool skip = false; if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS && (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS && cmd_type != CMD_NEXTSUBPASS2 && cmd_type != CMD_ENDRENDERPASS2)) { skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidCommandBuffer, "Commands cannot be called in a subpass using secondary command buffers."); } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) { skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidCommandBuffer, "vkCmdExecuteCommands() cannot be called in a subpass using inline commands."); } return skip; } bool CoreChecks::ValidateCmdQueueFlags(const CMD_BUFFER_STATE *cb_node, const char *caller_name, VkQueueFlags required_flags, const char *error_code) const { auto pool = cb_node->command_pool.get(); if (pool) { const uint32_t queue_family_index = pool->queueFamilyIndex; const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags; if (!(required_flags & queue_flags)) { string required_flags_string; for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT, VK_QUEUE_SPARSE_BINDING_BIT, VK_QUEUE_PROTECTED_BIT}) { if (flag & required_flags) { if (required_flags_string.size()) { required_flags_string += " or "; } required_flags_string += string_VkQueueFlagBits(flag); } } return LogError(cb_node->commandBuffer(), error_code, "%s(): Called in command buffer %s which was allocated from the command pool %s which was created with " "queueFamilyIndex %u which doesn't contain the required %s capability flags.", caller_name, report_data->FormatHandle(cb_node->commandBuffer()).c_str(), report_data->FormatHandle(pool->commandPool()).c_str(), queue_family_index, required_flags_string.c_str()); } } return false; } bool CoreChecks::ValidateSampleLocationsInfo(const VkSampleLocationsInfoEXT *pSampleLocationsInfo, const char *apiName) const { bool skip = false; const VkSampleCountFlagBits sample_count = pSampleLocationsInfo->sampleLocationsPerPixel; const uint32_t sample_total_size = pSampleLocationsInfo->sampleLocationGridSize.width * pSampleLocationsInfo->sampleLocationGridSize.height * SampleCountSize(sample_count); if (pSampleLocationsInfo->sampleLocationsCount != sample_total_size) { skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsCount-01527", "%s: VkSampleLocationsInfoEXT::sampleLocationsCount (%u) must equal grid width * grid height * pixel " "sample rate which currently is (%u * %u * %u).", apiName, pSampleLocationsInfo->sampleLocationsCount, pSampleLocationsInfo->sampleLocationGridSize.width, pSampleLocationsInfo->sampleLocationGridSize.height, SampleCountSize(sample_count)); } if ((phys_dev_ext_props.sample_locations_props.sampleLocationSampleCounts & sample_count) == 0) { skip |= LogError(device, "VUID-VkSampleLocationsInfoEXT-sampleLocationsPerPixel-01526", "%s: VkSampleLocationsInfoEXT::sampleLocationsPerPixel of %s is not supported by the device, please check " "VkPhysicalDeviceSampleLocationsPropertiesEXT::sampleLocationSampleCounts for valid sample counts.", apiName, string_VkSampleCountFlagBits(sample_count)); } return skip; } static char const *GetCauseStr(VulkanTypedHandle obj) { if (obj.type == kVulkanObjectTypeDescriptorSet) return "destroyed or updated"; if (obj.type == kVulkanObjectTypeCommandBuffer) return "destroyed or rerecorded"; return "destroyed"; } bool CoreChecks::ReportInvalidCommandBuffer(const CMD_BUFFER_STATE *cb_state, const char *call_source) const { bool skip = false; for (const auto& entry: cb_state->broken_bindings) { const auto& obj = entry.first; const char *cause_str = GetCauseStr(obj); string vuid; std::ostringstream str; str << kVUID_Core_DrawState_InvalidCommandBuffer << "-" << object_string[obj.type]; vuid = str.str(); auto objlist = entry.second; //intentional copy objlist.add(cb_state->commandBuffer()); skip |= LogError(objlist, vuid, "You are adding %s to %s that is invalid because bound %s was %s.", call_source, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(obj).c_str(), cause_str); } return skip; } bool CoreChecks::ValidateIndirectCmd(VkCommandBuffer command_buffer, VkBuffer buffer, CMD_TYPE cmd_type, const char *caller_name) const { bool skip = false; const DrawDispatchVuid vuid = GetDrawDispatchVuid(cmd_type); const CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer); const BUFFER_STATE *buffer_state = GetBufferState(buffer); if ((cb_state != nullptr) && (buffer_state != nullptr)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, caller_name, vuid.indirect_contiguous_memory); skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT, true, vuid.indirect_buffer_bit, caller_name, "VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT"); if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer(), vuid.indirect_protected_cb, "%s: Indirect commands can't be used in protected command buffers.", caller_name); } } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToPhysicalDeviceCount(uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; uint32_t count = 1 << physical_device_count; if (count <= deviceMask) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is invalid. Physical device count is %" PRIu32 ".", deviceMask, physical_device_count); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToZero(uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; if (deviceMask == 0) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") must be non-zero.", deviceMask); } return skip; } template <typename T1> bool CoreChecks::ValidateDeviceMaskToCommandBuffer(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const T1 object, const char *VUID) const { bool skip = false; if ((deviceMask & pCB->initial_device_mask) != deviceMask) { skip |= LogError(object, VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s initial device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->commandBuffer()).c_str(), pCB->initial_device_mask); } return skip; } bool CoreChecks::ValidateDeviceMaskToRenderPass(const CMD_BUFFER_STATE *pCB, uint32_t deviceMask, const char *VUID) const { bool skip = false; if ((deviceMask & pCB->active_render_pass_device_mask) != deviceMask) { skip |= LogError(pCB->commandBuffer(), VUID, "deviceMask(0x%" PRIx32 ") is not a subset of %s device mask(0x%" PRIx32 ").", deviceMask, report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str(), pCB->active_render_pass_device_mask); } return skip; } // Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a // render pass. bool CoreChecks::InsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const { bool inside = false; if (pCB->activeRenderPass) { inside = LogError(pCB->commandBuffer(), msgCode, "%s: It is invalid to issue this call inside an active %s.", apiName, report_data->FormatHandle(pCB->activeRenderPass->renderPass()).c_str()); } return inside; } // Flags validation error if the associated call is made outside a render pass. The apiName // routine should ONLY be called inside a render pass. bool CoreChecks::OutsideRenderPass(const CMD_BUFFER_STATE *pCB, const char *apiName, const char *msgCode) const { bool outside = false; if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) || ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) { outside = LogError(pCB->commandBuffer(), msgCode, "%s: This call must be issued inside an active render pass.", apiName); } return outside; } bool CoreChecks::ValidateQueueFamilyIndex(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t requested_queue_family, const char *err_code, const char *cmd_name, const char *queue_family_var_name) const { bool skip = false; if (requested_queue_family >= pd_state->queue_family_known_count) { const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; skip |= LogError(pd_state->phys_device, err_code, "%s: %s (= %" PRIu32 ") is not less than any previously obtained pQueueFamilyPropertyCount from " "vkGetPhysicalDeviceQueueFamilyProperties%s (i.e. is not less than %s).", cmd_name, queue_family_var_name, requested_queue_family, conditional_ext_cmd, std::to_string(pd_state->queue_family_known_count).c_str()); } return skip; } // Verify VkDeviceQueueCreateInfos bool CoreChecks::ValidateDeviceQueueCreateInfos(const PHYSICAL_DEVICE_STATE *pd_state, uint32_t info_count, const VkDeviceQueueCreateInfo *infos) const { bool skip = false; const uint32_t not_used = std::numeric_limits<uint32_t>::max(); struct create_flags { // uint32_t is to represent the queue family index to allow for better error messages uint32_t unprocted_index; uint32_t protected_index; create_flags(uint32_t a, uint32_t b) : unprocted_index(a), protected_index(b) {} }; layer_data::unordered_map<uint32_t, create_flags> queue_family_map; for (uint32_t i = 0; i < info_count; ++i) { const auto requested_queue_family = infos[i].queueFamilyIndex; std::string queue_family_var_name = "pCreateInfo->pQueueCreateInfos[" + std::to_string(i) + "].queueFamilyIndex"; skip |= ValidateQueueFamilyIndex(pd_state, requested_queue_family, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381", "vkCreateDevice", queue_family_var_name.c_str()); if (api_version == VK_API_VERSION_1_0) { // Vulkan 1.0 didn't have protected memory so always needed unique info create_flags flags = {requested_queue_family, not_used}; if (queue_family_map.emplace(requested_queue_family, flags).second == false) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-00372", "CreateDevice(): %s (=%" PRIu32 ") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d].", queue_family_var_name.c_str(), requested_queue_family, queue_family_map.at(requested_queue_family).unprocted_index); } } else { // Vulkan 1.1 and up can have 2 queues be same family index if one is protected and one isn't auto it = queue_family_map.find(requested_queue_family); if (it == queue_family_map.end()) { // Add first time seeing queue family index and what the create flags were create_flags new_flags = {not_used, not_used}; if ((infos[i].flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) != 0) { new_flags.protected_index = requested_queue_family; } else { new_flags.unprocted_index = requested_queue_family; } queue_family_map.emplace(requested_queue_family, new_flags); } else { // The queue family was seen, so now need to make sure the flags were different if ((infos[i].flags & VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT) != 0) { if (it->second.protected_index != not_used) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-02802", "CreateDevice(): %s (=%" PRIu32 ") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d] which both have " "VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT.", queue_family_var_name.c_str(), requested_queue_family, queue_family_map.at(requested_queue_family).protected_index); } else { it->second.protected_index = requested_queue_family; } } else { if (it->second.unprocted_index != not_used) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-queueFamilyIndex-02802", "CreateDevice(): %s (=%" PRIu32 ") is not unique and was also used in pCreateInfo->pQueueCreateInfos[%d].", queue_family_var_name.c_str(), requested_queue_family, queue_family_map.at(requested_queue_family).unprocted_index); } else { it->second.unprocted_index = requested_queue_family; } } } } // Verify that requested queue count of queue family is known to be valid at this point in time if (requested_queue_family < pd_state->queue_family_known_count) { const auto requested_queue_count = infos[i].queueCount; const bool queue_family_has_props = requested_queue_family < pd_state->queue_family_properties.size(); // spec guarantees at least one queue for each queue family const uint32_t available_queue_count = queue_family_has_props ? pd_state->queue_family_properties[requested_queue_family].queueCount : 1; const char *conditional_ext_cmd = instance_extensions.vk_khr_get_physical_device_properties_2 ? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]" : ""; if (requested_queue_count > available_queue_count) { const std::string count_note = queue_family_has_props ? "i.e. is not less than or equal to " + std::to_string(pd_state->queue_family_properties[requested_queue_family].queueCount) : "the pQueueFamilyProperties[" + std::to_string(requested_queue_family) + "] was never obtained"; skip |= LogError( pd_state->phys_device, "VUID-VkDeviceQueueCreateInfo-queueCount-00382", "vkCreateDevice: pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueCount (=%" PRIu32 ") is not less than or equal to available queue count for this pCreateInfo->pQueueCreateInfos[%" PRIu32 "].queueFamilyIndex} (=%" PRIu32 ") obtained previously from vkGetPhysicalDeviceQueueFamilyProperties%s (%s).", i, requested_queue_count, i, requested_queue_family, conditional_ext_cmd, count_note.c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) const { bool skip = false; auto pd_state = GetPhysicalDeviceState(gpu); // TODO: object_tracker should perhaps do this instead // and it does not seem to currently work anyway -- the loader just crashes before this point if (!pd_state) { skip |= LogError(device, kVUID_Core_DevLimit_MustQueryCount, "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices()."); } else { skip |= ValidateDeviceQueueCreateInfos(pd_state, pCreateInfo->queueCreateInfoCount, pCreateInfo->pQueueCreateInfos); const VkPhysicalDeviceFragmentShadingRateFeaturesKHR *fragment_shading_rate_features = LvlFindInChain<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(pCreateInfo->pNext); if (fragment_shading_rate_features) { const VkPhysicalDeviceShadingRateImageFeaturesNV *shading_rate_image_features = LvlFindInChain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext); if (shading_rate_image_features && shading_rate_image_features->shadingRateImage) { if (fragment_shading_rate_features->pipelineFragmentShadingRate) { skip |= LogError( pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04478", "vkCreateDevice: Cannot enable shadingRateImage and pipelineFragmentShadingRate features simultaneously."); } if (fragment_shading_rate_features->primitiveFragmentShadingRate) { skip |= LogError( pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04479", "vkCreateDevice: Cannot enable shadingRateImage and primitiveFragmentShadingRate features simultaneously."); } if (fragment_shading_rate_features->attachmentFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-shadingRateImage-04480", "vkCreateDevice: Cannot enable shadingRateImage and attachmentFragmentShadingRate features " "simultaneously."); } } const VkPhysicalDeviceFragmentDensityMapFeaturesEXT *fragment_density_map_features = LvlFindInChain<VkPhysicalDeviceFragmentDensityMapFeaturesEXT>(pCreateInfo->pNext); if (fragment_density_map_features && fragment_density_map_features->fragmentDensityMap) { if (fragment_shading_rate_features->pipelineFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04481", "vkCreateDevice: Cannot enable fragmentDensityMap and pipelineFragmentShadingRate features " "simultaneously."); } if (fragment_shading_rate_features->primitiveFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04482", "vkCreateDevice: Cannot enable fragmentDensityMap and primitiveFragmentShadingRate features " "simultaneously."); } if (fragment_shading_rate_features->attachmentFragmentShadingRate) { skip |= LogError(pd_state->phys_device, "VUID-VkDeviceCreateInfo-fragmentDensityMap-04483", "vkCreateDevice: Cannot enable fragmentDensityMap and attachmentFragmentShadingRate features " "simultaneously."); } } } } return skip; } void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) { // The state tracker sets up the device state StateTracker::PostCallRecordCreateDevice(gpu, pCreateInfo, pAllocator, pDevice, result); // Add the callback hooks for the functions that are either broadly or deeply used and that the ValidationStateTracker refactor // would be messier without. // TODO: Find a good way to do this hooklessly. ValidationObject *device_object = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation); CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data); core_checks->SetSetImageViewInitialLayoutCallback( [core_checks](CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &iv_state, VkImageLayout layout) -> void { core_checks->SetImageViewInitialLayout(cb_node, iv_state, layout); }); // Allocate shader validation cache if (!disabled[shader_validation_caching] && !disabled[shader_validation] && !core_checks->core_validation_cache) { std::string validation_cache_path; auto tmp_path = GetEnvironment("TMPDIR"); if (!tmp_path.size()) tmp_path = GetEnvironment("TMP"); if (!tmp_path.size()) tmp_path = GetEnvironment("TEMP"); if (!tmp_path.size()) tmp_path = "//tmp"; core_checks->validation_cache_path = tmp_path + "//shader_validation_cache.bin"; std::vector<char> validation_cache_data; std::ifstream read_file(core_checks->validation_cache_path.c_str(), std::ios::in | std::ios::binary); if (read_file) { std::copy(std::istreambuf_iterator<char>(read_file), {}, std::back_inserter(validation_cache_data)); read_file.close(); } else { LogInfo(core_checks->device, "VUID-NONE", "Cannot open shader validation cache at %s for reading (it may not exist yet)", core_checks->validation_cache_path.c_str()); } VkValidationCacheCreateInfoEXT cacheCreateInfo = {}; cacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT; cacheCreateInfo.pNext = NULL; cacheCreateInfo.initialDataSize = validation_cache_data.size(); cacheCreateInfo.pInitialData = validation_cache_data.data(); cacheCreateInfo.flags = 0; CoreLayerCreateValidationCacheEXT(*pDevice, &cacheCreateInfo, nullptr, &core_checks->core_validation_cache); } } void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { if (!device) return; imageLayoutMap.clear(); StateTracker::PreCallRecordDestroyDevice(device, pAllocator); if (core_validation_cache) { size_t validation_cache_size = 0; void *validation_cache_data = nullptr; CoreLayerGetValidationCacheDataEXT(device, core_validation_cache, &validation_cache_size, nullptr); validation_cache_data = (char *)malloc(sizeof(char) * validation_cache_size); if (!validation_cache_data) { LogInfo(device, "VUID-NONE", "Validation Cache Memory Error"); return; } VkResult result = CoreLayerGetValidationCacheDataEXT(device, core_validation_cache, &validation_cache_size, validation_cache_data); if (result != VK_SUCCESS) { LogInfo(device, "VUID-NONE", "Validation Cache Retrieval Error"); return; } FILE *write_file = fopen(validation_cache_path.c_str(), "wb"); if (write_file) { fwrite(validation_cache_data, sizeof(char), validation_cache_size, write_file); fclose(write_file); } else { LogInfo(device, "VUID-NONE", "Cannot open shader validation cache at %s for writing", validation_cache_path.c_str()); } free(validation_cache_data); CoreLayerDestroyValidationCacheEXT(device, core_validation_cache, NULL); } } bool CoreChecks::ValidateStageMaskHost(const Location &loc, VkPipelineStageFlags2KHR stageMask) const { bool skip = false; if ((stageMask & VK_PIPELINE_STAGE_HOST_BIT) != 0) { const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, sync_vuid_maps::SubmitError::kHostStageMask); skip |= LogError( device, vuid, "%s stage mask must not include VK_PIPELINE_STAGE_HOST_BIT as the stage can't be invoked inside a command buffer.", loc.Message().c_str()); } return skip; } // Note: This function assumes that the global lock is held by the calling thread. // For the given queue, verify the queue state up to the given seq number. // Currently the only check is to make sure that if there are events to be waited on prior to // a QueryReset, make sure that all such events have been signalled. bool CoreChecks::VerifyQueueStateToSeq(const QUEUE_STATE *initial_queue, uint64_t initial_seq) const { bool skip = false; // sequence number we want to validate up to, per queue layer_data::unordered_map<const QUEUE_STATE *, uint64_t> target_seqs{{initial_queue, initial_seq}}; // sequence number we've completed validation for, per queue layer_data::unordered_map<const QUEUE_STATE *, uint64_t> done_seqs; std::vector<const QUEUE_STATE *> worklist{initial_queue}; while (worklist.size()) { auto queue = worklist.back(); worklist.pop_back(); auto target_seq = target_seqs[queue]; auto seq = std::max(done_seqs[queue], queue->seq); auto sub_it = queue->submissions.begin() + int(seq - queue->seq); // seq >= queue->seq for (; seq < target_seq; ++sub_it, ++seq) { for (auto &wait : sub_it->waitSemaphores) { auto other_queue = GetQueueState(wait.queue); if (other_queue == queue) continue; // semaphores /always/ point backwards, so no point here. auto other_target_seq = std::max(target_seqs[other_queue], wait.seq); auto other_done_seq = std::max(done_seqs[other_queue], other_queue->seq); // if this wait is for another queue, and covers new sequence // numbers beyond what we've already validated, mark the new // target seq and (possibly-re)add the queue to the worklist. if (other_done_seq < other_target_seq) { target_seqs[other_queue] = other_target_seq; worklist.push_back(other_queue); } } } // finally mark the point we've now validated this queue to. done_seqs[queue] = seq; } return skip; } // When the given fence is retired, verify outstanding queue operations through the point of the fence bool CoreChecks::VerifyQueueStateToFence(VkFence fence) const { auto fence_state = GetFenceState(fence); if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) { return VerifyQueueStateToSeq(GetQueueState(fence_state->signaler.first), fence_state->signaler.second); } return false; } bool CoreChecks::ValidateCommandBufferSimultaneousUse(const Location &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; if ((pCB->InUse() || current_submit_count > 1) && !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { const auto &vuid = sync_vuid_maps::GetQueueSubmitVUID(loc, SubmitError::kCmdNotSimultaneous); skip |= LogError(device, vuid, "%s %s is already in use and is not marked for simultaneous use.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str()); } return skip; } bool CoreChecks::ValidateCommandBufferState(const CMD_BUFFER_STATE *cb_state, const char *call_source, int current_submit_count, const char *vu_id) const { bool skip = false; if (disabled[command_buffer_state]) return skip; // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once if ((cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (cb_state->submitCount + current_submit_count > 1)) { skip |= LogError(cb_state->commandBuffer(), kVUID_Core_DrawState_CommandBufferSingleSubmitViolation, "%s was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted 0x%" PRIxLEAST64 "times.", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), cb_state->submitCount + current_submit_count); } // Validate that cmd buffers have been updated switch (cb_state->state) { case CB_INVALID_INCOMPLETE: case CB_INVALID_COMPLETE: skip |= ReportInvalidCommandBuffer(cb_state, call_source); break; case CB_NEW: skip |= LogError(cb_state->commandBuffer(), vu_id, "%s used in the call to %s is unrecorded and contains no commands.", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), call_source); break; case CB_RECORDING: skip |= LogError(cb_state->commandBuffer(), kVUID_Core_DrawState_NoEndCommandBuffer, "You must call vkEndCommandBuffer() on %s before this call to %s!", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), call_source); break; default: /* recorded */ break; } return skip; } // Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices bool CoreChecks::ValidImageBufferQueue(const CMD_BUFFER_STATE *cb_node, const VulkanTypedHandle &object, uint32_t queueFamilyIndex, uint32_t count, const uint32_t *indices) const { bool found = false; bool skip = false; for (uint32_t i = 0; i < count; i++) { if (indices[i] == queueFamilyIndex) { found = true; break; } } if (!found) { LogObjectList objlist(cb_node->commandBuffer()); objlist.add(object); skip = LogError(objlist, "VUID-vkQueueSubmit-pSubmits-04626", "vkQueueSubmit: %s contains %s which was not created allowing concurrent access to " "this queue family %d.", report_data->FormatHandle(cb_node->commandBuffer()).c_str(), report_data->FormatHandle(object).c_str(), queueFamilyIndex); } return skip; } // Validate that queueFamilyIndices of primary command buffers match this queue // Secondary command buffers were previously validated in vkCmdExecuteCommands(). bool CoreChecks::ValidateQueueFamilyIndices(const Location &loc, const CMD_BUFFER_STATE *pCB, VkQueue queue) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; auto pool = pCB->command_pool.get(); auto queue_state = GetQueueState(queue); if (pool && queue_state) { if (pool->queueFamilyIndex != queue_state->queueFamilyIndex) { LogObjectList objlist(pCB->commandBuffer()); objlist.add(queue); const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kCmdWrongQueueFamily); skip |= LogError(objlist, vuid, "%s Primary %s created in queue family %d is being submitted on %s " "from queue family %d.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(), pool->queueFamilyIndex, report_data->FormatHandle(queue).c_str(), queue_state->queueFamilyIndex); } // Ensure that any bound images or buffers created with SHARING_MODE_CONCURRENT have access to the current queue family for (const auto &object : pCB->object_bindings) { if (object.type == kVulkanObjectTypeImage) { auto image_state = object.node ? (IMAGE_STATE *)object.node : GetImageState(object.Cast<VkImage>()); if (image_state && image_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex, image_state->createInfo.queueFamilyIndexCount, image_state->createInfo.pQueueFamilyIndices); } } else if (object.type == kVulkanObjectTypeBuffer) { auto buffer_state = object.node ? (BUFFER_STATE *)object.node : GetBufferState(object.Cast<VkBuffer>()); if (buffer_state && buffer_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) { skip |= ValidImageBufferQueue(pCB, object, queue_state->queueFamilyIndex, buffer_state->createInfo.queueFamilyIndexCount, buffer_state->createInfo.pQueueFamilyIndices); } } } } return skip; } bool CoreChecks::ValidatePrimaryCommandBufferState( const Location &loc, const CMD_BUFFER_STATE *pCB, int current_submit_count, QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards, QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; // Track in-use for resources off of primary and any secondary CBs bool skip = false; if (pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdInSubmit); skip |= LogError(pCB->commandBuffer(), vuid, "%s Command buffer %s must be allocated with VK_COMMAND_BUFFER_LEVEL_PRIMARY.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str()); } else { for (const auto *sub_cb : pCB->linkedCommandBuffers) { skip |= ValidateQueuedQFOTransfers(sub_cb, qfo_image_scoreboards, qfo_buffer_scoreboards); // TODO: replace with InvalidateCommandBuffers() at recording. if ((sub_cb->primaryCommandBuffer != pCB->commandBuffer()) && !(sub_cb->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { LogObjectList objlist(device); objlist.add(pCB->commandBuffer()); objlist.add(sub_cb->commandBuffer()); objlist.add(sub_cb->primaryCommandBuffer); const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kSecondaryCmdNotSimultaneous); skip |= LogError(objlist, vuid, "%s %s was submitted with secondary %s but that buffer has subsequently been bound to " "primary %s and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb->primaryCommandBuffer).c_str()); } } } // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing on device skip |= ValidateCommandBufferSimultaneousUse(loc, pCB, current_submit_count); skip |= ValidateQueuedQFOTransfers(pCB, qfo_image_scoreboards, qfo_buffer_scoreboards); const char *vuid = loc.function == Func::vkQueueSubmit ? "VUID-vkQueueSubmit-pCommandBuffers-00072" : "VUID-vkQueueSubmit2KHR-commandBuffer-03876"; skip |= ValidateCommandBufferState(pCB, loc.StringFunc().c_str(), current_submit_count, vuid); return skip; } bool CoreChecks::ValidateFenceForSubmit(const FENCE_STATE *pFence, const char *inflight_vuid, const char *retired_vuid, const char *func_name) const { bool skip = false; if (pFence && pFence->scope == kSyncScopeInternal) { if (pFence->state == FENCE_INFLIGHT) { skip |= LogError(pFence->fence(), inflight_vuid, "%s: %s is already in use by another submission.", func_name, report_data->FormatHandle(pFence->fence()).c_str()); } else if (pFence->state == FENCE_RETIRED) { skip |= LogError(pFence->fence(), retired_vuid, "%s: %s submitted in SIGNALED state. Fences must be reset before being submitted", func_name, report_data->FormatHandle(pFence->fence()).c_str()); } } return skip; } void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) { StateTracker::PostCallRecordQueueSubmit(queue, submitCount, pSubmits, fence, result); if (result != VK_SUCCESS) return; // The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks. for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferCount; i++) { auto cb_node = GetCBState(submit->pCommandBuffers[i]); if (cb_node) { for (auto *secondary_cmd_buffer : cb_node->linkedCommandBuffers) { UpdateCmdBufImageLayouts(secondary_cmd_buffer); RecordQueuedQFOTransfers(secondary_cmd_buffer); } UpdateCmdBufImageLayouts(cb_node); RecordQueuedQFOTransfers(cb_node); } } } } void CoreChecks::PostCallRecordQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence, VkResult result) { StateTracker::PostCallRecordQueueSubmit2KHR(queue, submitCount, pSubmits, fence, result); if (result != VK_SUCCESS) return; // The triply nested for duplicates that in the StateTracker, but avoids the need for two additional callbacks. for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { auto cb_node = GetCBState(submit->pCommandBufferInfos[i].commandBuffer); if (cb_node) { for (auto *secondaryCmdBuffer : cb_node->linkedCommandBuffers) { UpdateCmdBufImageLayouts(secondaryCmdBuffer); RecordQueuedQFOTransfers(secondaryCmdBuffer); } UpdateCmdBufImageLayouts(cb_node); RecordQueuedQFOTransfers(cb_node); } } } } bool CoreChecks::SemaphoreWasSignaled(VkSemaphore semaphore) const { for (auto &pair : queueMap) { const QUEUE_STATE &queue_state = pair.second; for (const auto &submission : queue_state.submissions) { for (const auto &signal_semaphore : submission.signalSemaphores) { if (signal_semaphore.semaphore == semaphore) { return true; } } } } return false; } struct SemaphoreSubmitState { const CoreChecks *core; VkQueueFlags queue_flags; layer_data::unordered_set<VkSemaphore> signaled_semaphores; layer_data::unordered_set<VkSemaphore> unsignaled_semaphores; layer_data::unordered_set<VkSemaphore> internal_semaphores; SemaphoreSubmitState(const CoreChecks *core_, VkQueueFlags queue_flags_) : core(core_), queue_flags(queue_flags_) {} bool ValidateWaitSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value, uint32_t device_Index) { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; LogObjectList objlist(semaphore); objlist.add(queue); const auto *pSemaphore = core->GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(pSemaphore->signaled) && !core->SemaphoreWasSignaled(semaphore))) { auto error = core->device_extensions.vk_khr_timeline_semaphore ? SubmitError::kTimelineCannotBeSignalled : SubmitError::kBinaryCannotBeSignalled; const auto &vuid = GetQueueSubmitVUID(loc, error); skip |= core->LogError( objlist, pSemaphore->scope == kSyncScopeInternal ? vuid : kVUID_Core_DrawState_QueueForwardProgress, "%s Queue %s is waiting on semaphore (%s) that has no way to be signaled.", loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && pSemaphore->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } return skip; } bool ValidateSignalSemaphore(const core_error::Location &loc, VkQueue queue, VkSemaphore semaphore, uint64_t value, uint32_t deviceIndex) { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; LogObjectList objlist(semaphore); objlist.add(queue); const auto *pSemaphore = core->GetSemaphoreState(semaphore); if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_TIMELINE_KHR && value <= pSemaphore->payload) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemSmallValue); skip |= core->LogError(objlist, vuid, "%s signal value (0x%" PRIx64 ") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ")", loc.Message().c_str(), pSemaphore->payload, core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str(), value); } if (pSemaphore && pSemaphore->type == VK_SEMAPHORE_TYPE_BINARY_KHR && (pSemaphore->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && pSemaphore->signaled)) { objlist.add(pSemaphore->signaler.first); skip |= core->LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress, "%s is signaling %s (%s) that was previously " "signaled by %s but has not since been waited on by any queue.", loc.Message().c_str(), core->report_data->FormatHandle(queue).c_str(), core->report_data->FormatHandle(semaphore).c_str(), core->report_data->FormatHandle(pSemaphore->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } return skip; } }; bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo *submit, const Location &outer_loc) const { bool skip = false; auto *timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext); for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { uint64_t value = 0; uint32_t device_index = 0; // TODO: VkSemaphore semaphore = submit->pWaitSemaphores[i]; LogObjectList objlist(semaphore); objlist.add(queue); if (submit->pWaitDstStageMask) { auto loc = outer_loc.dot(Field::pWaitDstStageMask, i); skip |= ValidatePipelineStage(objlist, loc, state.queue_flags, submit->pWaitDstStageMask[i]); skip |= ValidateStageMaskHost(loc, submit->pWaitDstStageMask[i]); } const auto *semaphore_state = GetSemaphoreState(semaphore); if (!semaphore_state) { continue; } auto loc = outer_loc.dot(Field::pWaitSemaphores, i); if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) { if (timeline_semaphore_submit_info == nullptr) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239", "%s (%s) is a timeline semaphore, but VkSubmitInfo does " "not include an instance of VkTimelineSemaphoreSubmitInfo", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); continue; } else if (submit->waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03240", "%s (%s) is a timeline semaphore, it contains an " "instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different than " "waitSemaphoreCount (%u)", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->waitSemaphoreValueCount, submit->waitSemaphoreCount); continue; } value = timeline_semaphore_submit_info->pWaitSemaphoreValues[i]; } skip |= state.ValidateWaitSemaphore(outer_loc.dot(Field::pWaitSemaphores, i), queue, semaphore, value, device_index); } for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; uint64_t value = 0; uint32_t device_index = 0; const auto *semaphore_state = GetSemaphoreState(semaphore); if (!semaphore_state) { continue; } auto loc = outer_loc.dot(Field::pSignalSemaphores, i); if (semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE) { if (timeline_semaphore_submit_info == nullptr) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pWaitSemaphores-03239", "%s (%s) is a timeline semaphore, but VkSubmitInfo" "does not include an instance of VkTimelineSemaphoreSubmitInfo", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); continue; } else if (submit->signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkSubmitInfo-pNext-03241", "%s (%s) is a timeline semaphore, it contains an " "instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different than " "signalSemaphoreCount (%u)", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->signalSemaphoreValueCount, submit->signalSemaphoreCount); continue; } value = timeline_semaphore_submit_info->pSignalSemaphoreValues[i]; } skip |= state.ValidateSignalSemaphore(loc, queue, semaphore, value, device_index); } return skip; } bool CoreChecks::ValidateSemaphoresForSubmit(SemaphoreSubmitState &state, VkQueue queue, const VkSubmitInfo2KHR *submit, const Location &outer_loc) const { bool skip = false; for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) { const auto &sem_info = submit->pWaitSemaphoreInfos[i]; Location loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i); skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags, sem_info.stageMask); skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask); skip |= state.ValidateWaitSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex); } for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) { const auto &sem_info = submit->pSignalSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i); skip |= ValidatePipelineStage(LogObjectList(sem_info.semaphore), loc.dot(Field::stageMask), state.queue_flags, sem_info.stageMask); skip |= ValidateStageMaskHost(loc.dot(Field::stageMask), sem_info.stageMask); skip |= state.ValidateSignalSemaphore(loc, queue, sem_info.semaphore, sem_info.value, sem_info.deviceIndex); } return skip; } bool CoreChecks::ValidateMaxTimelineSemaphoreValueDifference(const Location &loc, VkSemaphore semaphore, uint64_t value) const { using sync_vuid_maps::GetQueueSubmitVUID; using sync_vuid_maps::SubmitError; bool skip = false; const auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) return false; uint64_t diff = value > semaphore_state->payload ? value - semaphore_state->payload : semaphore_state->payload - value; if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding current semaphore %s payload", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); } for (auto &pair : queueMap) { const QUEUE_STATE &queue_state = pair.second; for (const auto &submission : queue_state.submissions) { for (const auto &signal_semaphore : submission.signalSemaphores) { if (signal_semaphore.semaphore == semaphore) { diff = value > signal_semaphore.payload ? value - signal_semaphore.payload : signal_semaphore.payload - value; if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s signal value", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); } } } for (const auto &wait_semaphore : submission.waitSemaphores) { if (wait_semaphore.semaphore == semaphore) { diff = value > wait_semaphore.payload ? value - wait_semaphore.payload : wait_semaphore.payload - value; if (diff > phys_dev_props_core12.maxTimelineSemaphoreValueDifference) { const auto &vuid = GetQueueSubmitVUID(loc, SubmitError::kTimelineSemMaxDiff); skip |= LogError(semaphore, vuid, "%s value exceeds limit regarding pending semaphore %s wait value", loc.Message().c_str(), report_data->FormatHandle(semaphore).c_str()); } } } } } return skip; } struct CommandBufferSubmitState { const CoreChecks *core; const QUEUE_STATE *queue_state; QFOTransferCBScoreboards<QFOImageTransferBarrier> qfo_image_scoreboards; QFOTransferCBScoreboards<QFOBufferTransferBarrier> qfo_buffer_scoreboards; vector<VkCommandBuffer> current_cmds; GlobalImageLayoutMap overlay_image_layout_map; QueryMap local_query_to_state_map; EventToStageMap local_event_to_stage_map; CommandBufferSubmitState(const CoreChecks *c, const char *func, const QUEUE_STATE *q) : core(c), queue_state(q) {} bool Validate(const core_error::Location &loc, VkCommandBuffer cmd, uint32_t perf_pass) { bool skip = false; const auto *cb_node = core->GetCBState(cmd); if (cb_node == nullptr) { return skip; } skip |= core->ValidateCmdBufImageLayouts(cb_node, core->imageLayoutMap, overlay_image_layout_map); current_cmds.push_back(cmd); skip |= core->ValidatePrimaryCommandBufferState(loc, cb_node, static_cast<int>(std::count(current_cmds.begin(), current_cmds.end(), cmd)), &qfo_image_scoreboards, &qfo_buffer_scoreboards); skip |= core->ValidateQueueFamilyIndices(loc, cb_node, queue_state->queue); for (const auto &descriptor_set : cb_node->validate_descriptorsets_in_queuesubmit) { const cvdescriptorset::DescriptorSet *set_node = core->GetSetNode(descriptor_set.first); if (!set_node) { continue; } for (const auto &cmd_info : descriptor_set.second) { std::string function = loc.StringFunc(); function += ", "; function += cmd_info.function; for (const auto &binding_info : cmd_info.binding_infos) { std::string error; std::vector<uint32_t> dynamic_offsets; // dynamic data isn't allowed in UPDATE_AFTER_BIND, so dynamicOffsets is always empty. // This submit time not record time... const bool record_time_validate = false; layer_data::optional<layer_data::unordered_map<VkImageView, VkImageLayout>> checked_layouts; if (set_node->GetTotalDescriptorCount() > cvdescriptorset::PrefilterBindRequestMap::kManyDescriptors_) { checked_layouts.emplace(); } skip |= core->ValidateDescriptorSetBindingData( cb_node, set_node, dynamic_offsets, binding_info, cmd_info.framebuffer, cmd_info.attachments.get(), cmd_info.subpasses.get(), record_time_validate, function.c_str(), core->GetDrawDispatchVuid(cmd_info.cmd_type), checked_layouts); } } } // Potential early exit here as bad object state may crash in delayed function calls if (skip) { return true; } // Call submit-time functions to validate or update local mirrors of state (to preserve const-ness at validate time) for (auto &function : cb_node->queue_submit_functions) { skip |= function(core, queue_state); } for (auto &function : cb_node->eventUpdates) { skip |= function(core, /*do_validate*/ true, &local_event_to_stage_map); } VkQueryPool first_perf_query_pool = VK_NULL_HANDLE; for (auto &function : cb_node->queryUpdates) { skip |= function(core, /*do_validate*/ true, first_perf_query_pool, perf_pass, &local_query_to_state_map); } return skip; } }; bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) const { const auto *fence_state = GetFenceState(fence); bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueSubmit-fence-00064", "VUID-vkQueueSubmit-fence-00063", "vkQueueSubmit()"); if (skip) { return true; } const auto queue_state = GetQueueState(queue); CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit()", queue_state); SemaphoreSubmitState sem_submit_state( this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags); // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo *submit = &pSubmits[submit_idx]; const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext); uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0; Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx); for (uint32_t i = 0; i < submit->commandBufferCount; i++) { skip |= cb_submit_state.Validate(loc.dot(Field::pCommandBuffers, i), submit->pCommandBuffers[i], perf_pass); } skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc); auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupSubmitInfo>(submit->pNext); if (chained_device_group_struct && chained_device_group_struct->commandBufferCount > 0) { for (uint32_t i = 0; i < chained_device_group_struct->commandBufferCount; ++i) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->pCommandBufferDeviceMasks[i], queue, "VUID-VkDeviceGroupSubmitInfo-pCommandBufferDeviceMasks-00086"); } } auto protected_submit_info = LvlFindInChain<VkProtectedSubmitInfo>(submit->pNext); if (protected_submit_info) { const bool protected_submit = protected_submit_info->protectedSubmit == VK_TRUE; // Only check feature once for submit if ((protected_submit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) { skip |= LogError(queue, "VUID-VkProtectedSubmitInfo-protectedSubmit-01816", "vkQueueSubmit(): The protectedMemory device feature is disabled, can't submit a protected queue " "to %s pSubmits[%u]", report_data->FormatHandle(queue).c_str(), submit_idx); } // Make sure command buffers are all protected or unprotected for (uint32_t i = 0; i < submit->commandBufferCount; i++) { const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBuffers[i]); if (cb_state != nullptr) { if ((cb_state->unprotected == true) && (protected_submit == true)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04148", "vkQueueSubmit(): command buffer %s is unprotected while queue %s pSubmits[%u] has " "VkProtectedSubmitInfo:protectedSubmit set to VK_TRUE", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } if ((cb_state->unprotected == false) && (protected_submit == false)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo-pNext-04120", "vkQueueSubmit(): command buffer %s is protected while queue %s pSubmits[%u] has " "VkProtectedSubmitInfo:protectedSubmit set to VK_FALSE", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } } } } } if (skip) return skip; // Now verify maxTimelineSemaphoreValueDifference for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { Location loc(Func::vkQueueSubmit, Struct::VkSubmitInfo, Field::pSubmits, submit_idx); const VkSubmitInfo *submit = &pSubmits[submit_idx]; auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(submit->pNext); if (info) { // If there are any timeline semaphores, this condition gets checked before the early return above if (info->waitSemaphoreValueCount) { for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pWaitSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pWaitSemaphores, i), semaphore, info->pWaitSemaphoreValues[i]); } } // If there are any timeline semaphores, this condition gets checked before the early return above if (info->signalSemaphoreValueCount) { for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) { VkSemaphore semaphore = submit->pSignalSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::pSignalSemaphores, i), semaphore, info->pSignalSemaphoreValues[i]); } } } } return skip; } bool CoreChecks::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR *pSubmits, VkFence fence) const { const auto *pFence = GetFenceState(fence); bool skip = ValidateFenceForSubmit(pFence, "VUID-vkQueueSubmit2KHR-fence-04895", "VUID-vkQueueSubmit2KHR-fence-04894", "vkQueueSubmit2KHR()"); if (skip) { return true; } const auto queue_state = GetQueueState(queue); CommandBufferSubmitState cb_submit_state(this, "vkQueueSubmit2KHR()", queue_state); SemaphoreSubmitState sem_submit_state( this, GetPhysicalDeviceState()->queue_family_properties[queue_state->queueFamilyIndex].queueFlags); // Now verify each individual submit for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; const auto perf_submit = LvlFindInChain<VkPerformanceQuerySubmitInfoKHR>(submit->pNext); uint32_t perf_pass = perf_submit ? perf_submit->counterPassIndex : 0; Location loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx); skip |= ValidateSemaphoresForSubmit(sem_submit_state, queue, submit, loc); bool protectedSubmit = (submit->flags & VK_SUBMIT_PROTECTED_BIT_KHR) != 0; // Only check feature once for submit if ((protectedSubmit == true) && (enabled_features.core11.protectedMemory == VK_FALSE)) { skip |= LogError(queue, "VUID-VkSubmitInfo2KHR-flags-03885", "vkQueueSubmit2KHR(): The protectedMemory device feature is disabled, can't submit a protected queue " "to %s pSubmits[%u]", report_data->FormatHandle(queue).c_str(), submit_idx); } for (uint32_t i = 0; i < submit->commandBufferInfoCount; i++) { auto info_loc = loc.dot(Field::pCommandBufferInfos, i); info_loc.structure = Struct::VkCommandBufferSubmitInfoKHR; skip |= cb_submit_state.Validate(info_loc.dot(Field::commandBuffer), submit->pCommandBufferInfos[i].commandBuffer, perf_pass); skip |= ValidateDeviceMaskToPhysicalDeviceCount(submit->pCommandBufferInfos[i].deviceMask, queue, "VUID-VkCommandBufferSubmitInfoKHR-deviceMask-03891"); // Make sure command buffers are all protected or unprotected const CMD_BUFFER_STATE *cb_state = GetCBState(submit->pCommandBufferInfos[i].commandBuffer); if (cb_state != nullptr) { if ((cb_state->unprotected == true) && (protectedSubmit == true)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03886", "vkQueueSubmit2KHR(): command buffer %s is unprotected while queue %s pSubmits[%u] has " "VK_SUBMIT_PROTECTED_BIT_KHR set", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } if ((cb_state->unprotected == false) && (protectedSubmit == false)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(queue); skip |= LogError(objlist, "VUID-VkSubmitInfo2KHR-flags-03887", "vkQueueSubmit2KHR(): command buffer %s is protected while queue %s pSubmitInfos[%u] has " "VK_SUBMIT_PROTECTED_BIT_KHR not set", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(queue).c_str(), submit_idx); } } } } if (skip) return skip; // Now verify maxTimelineSemaphoreValueDifference for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) { const VkSubmitInfo2KHR *submit = &pSubmits[submit_idx]; Location outer_loc(Func::vkQueueSubmit2KHR, Struct::VkSubmitInfo2KHR, Field::pSubmits, submit_idx); // If there are any timeline semaphores, this condition gets checked before the early return above for (uint32_t i = 0; i < submit->waitSemaphoreInfoCount; ++i) { const auto *sem_info = &submit->pWaitSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pWaitSemaphoreInfos, i); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value); } // If there are any timeline semaphores, this condition gets checked before the early return above for (uint32_t i = 0; i < submit->signalSemaphoreInfoCount; ++i) { const auto *sem_info = &submit->pSignalSemaphoreInfos[i]; auto loc = outer_loc.dot(Field::pSignalSemaphoreInfos, i); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc.dot(Field::semaphore), sem_info->semaphore, sem_info->value); } } return skip; } #ifdef AHB_VALIDATION_SUPPORT // Android-specific validation that uses types defined only on Android and only for NDK versions // that support the VK_ANDROID_external_memory_android_hardware_buffer extension. // This chunk could move into a seperate core_validation_android.cpp file... ? // clang-format off // Map external format and usage flags to/from equivalent Vulkan flags // (Tables as of v1.1.92) // AHardwareBuffer Format Vulkan Format // ====================== ============= // AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM VK_FORMAT_R8G8B8A8_UNORM // AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM VK_FORMAT_R8G8B8_UNORM // AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM VK_FORMAT_R5G6B5_UNORM_PACK16 // AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT VK_FORMAT_R16G16B16A16_SFLOAT // AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM VK_FORMAT_A2B10G10R10_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D16_UNORM VK_FORMAT_D16_UNORM // AHARDWAREBUFFER_FORMAT_D24_UNORM VK_FORMAT_X8_D24_UNORM_PACK32 // AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT VK_FORMAT_D24_UNORM_S8_UINT // AHARDWAREBUFFER_FORMAT_D32_FLOAT VK_FORMAT_D32_SFLOAT // AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT VK_FORMAT_D32_SFLOAT_S8_UINT // AHARDWAREBUFFER_FORMAT_S8_UINT VK_FORMAT_S8_UINT // The AHARDWAREBUFFER_FORMAT_* are an enum in the NDK headers, but get passed in to Vulkan // as uint32_t. Casting the enums here avoids scattering casts around in the code. std::map<uint32_t, VkFormat> ahb_format_map_a2v = { { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM, VK_FORMAT_R8G8B8A8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM, VK_FORMAT_R8G8B8_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM, VK_FORMAT_R5G6B5_UNORM_PACK16 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT, VK_FORMAT_R16G16B16A16_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM, VK_FORMAT_A2B10G10R10_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D16_UNORM, VK_FORMAT_D16_UNORM }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM, VK_FORMAT_X8_D24_UNORM_PACK32 }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT, VK_FORMAT_D32_SFLOAT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_D32_FLOAT_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT }, { (uint32_t)AHARDWAREBUFFER_FORMAT_S8_UINT, VK_FORMAT_S8_UINT } }; // AHardwareBuffer Usage Vulkan Usage or Creation Flag (Intermixed - Aargh!) // ===================== =================================================== // None VK_IMAGE_USAGE_TRANSFER_SRC_BIT // None VK_IMAGE_USAGE_TRANSFER_DST_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_SAMPLED_BIT // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT // AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE None // AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT VK_IMAGE_CREATE_PROTECTED_BIT // None VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT // None VK_IMAGE_CREATE_EXTENDED_USAGE_BIT // Same casting rationale. De-mixing the table to prevent type confusion and aliasing std::map<uint64_t, VkImageUsageFlags> ahb_usage_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE, (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<uint64_t, VkImageCreateFlags> ahb_create_map_a2v = { { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT, VK_IMAGE_CREATE_PROTECTED_BIT }, { (uint64_t)AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, 0 }, // No equivalent }; std::map<VkImageUsageFlags, uint64_t> ahb_usage_map_v2a = { { VK_IMAGE_USAGE_SAMPLED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE }, { VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER }, { VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER }, }; std::map<VkImageCreateFlags, uint64_t> ahb_create_map_v2a = { { VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP }, { VK_IMAGE_CREATE_PROTECTED_BIT, (uint64_t)AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT }, }; // clang-format on // // AHB-extension new APIs // bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const { bool skip = false; // buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags. AHardwareBuffer_Desc ahb_desc; AHardwareBuffer_describe(buffer, &ahb_desc); uint32_t required_flags = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER; if (0 == (ahb_desc.usage & required_flags)) { skip |= LogError(device, "VUID-vkGetAndroidHardwareBufferPropertiesANDROID-buffer-01884", "vkGetAndroidHardwareBufferPropertiesANDROID: The AHardwareBuffer's AHardwareBuffer_Desc.usage (0x%" PRIx64 ") does not have any AHARDWAREBUFFER_USAGE_GPU_* flags set.", ahb_desc.usage); } return skip; } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) const { bool skip = false; const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory); // VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID must have been included in // VkExportMemoryAllocateInfo::handleTypes when memory was created. if (!mem_info->IsExport() || (0 == (mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID))) { skip |= LogError(device, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-handleTypes-01882", "vkGetMemoryAndroidHardwareBufferANDROID: %s was not allocated for export, or the " "export handleTypes (0x%" PRIx32 ") did not contain VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.", report_data->FormatHandle(pInfo->memory).c_str(), mem_info->export_handle_type_flags); } // If the pNext chain of the VkMemoryAllocateInfo used to allocate memory included a VkMemoryDedicatedAllocateInfo // with non-NULL image member, then that image must already be bound to memory. if (mem_info->IsDedicatedImage()) { const auto image_state = GetImageState(mem_info->dedicated->handle.Cast<VkImage>()); if ((nullptr == image_state) || (0 == (image_state->GetBoundMemory().count(mem_info->mem())))) { LogObjectList objlist(device); objlist.add(pInfo->memory); objlist.add(mem_info->dedicated->handle); skip |= LogError(objlist, "VUID-VkMemoryGetAndroidHardwareBufferInfoANDROID-pNext-01883", "vkGetMemoryAndroidHardwareBufferANDROID: %s was allocated using a dedicated " "%s, but that image is not bound to the VkDeviceMemory object.", report_data->FormatHandle(pInfo->memory).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str()); } } return skip; } // // AHB-specific validation within non-AHB APIs // bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { bool skip = false; auto import_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext); auto exp_mem_alloc_info = LvlFindInChain<VkExportMemoryAllocateInfo>(alloc_info->pNext); auto mem_ded_alloc_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(alloc_info->pNext); if ((import_ahb_info) && (NULL != import_ahb_info->buffer)) { // This is an import with handleType of VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID AHardwareBuffer_Desc ahb_desc = {}; AHardwareBuffer_describe(import_ahb_info->buffer, &ahb_desc); // Validate AHardwareBuffer_Desc::usage is a valid usage for imported AHB // // BLOB & GPU_DATA_BUFFER combo specifically allowed if ((AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { // Otherwise, must be a combination from the AHardwareBuffer Format and Usage Equivalence tables // Usage must have at least one bit from the table. It may have additional bits not in the table uint64_t ahb_equiv_usage_bits = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE | AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP | AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE | AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT; if (0 == (ahb_desc.usage & ahb_equiv_usage_bits)) { skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01881", "vkAllocateMemory: The AHardwareBuffer_Desc's usage (0x%" PRIx64 ") is not compatible with Vulkan.", ahb_desc.usage); } } // Collect external buffer info auto pdebi = LvlInitStruct<VkPhysicalDeviceExternalBufferInfo>(); pdebi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) { pdebi.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER]; } auto ext_buf_props = LvlInitStruct<VkExternalBufferProperties>(); DispatchGetPhysicalDeviceExternalBufferProperties(physical_device, &pdebi, &ext_buf_props); // If buffer is not NULL, Android hardware buffers must be supported for import, as reported by // VkExternalImageFormatProperties or VkExternalBufferProperties. if (0 == (ext_buf_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT)) { // Collect external format info auto pdeifi = LvlInitStruct<VkPhysicalDeviceExternalImageFormatInfo>(); pdeifi.handleType = VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID; auto pdifi2 = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&pdeifi); if (0 < ahb_format_map_a2v.count(ahb_desc.format)) pdifi2.format = ahb_format_map_a2v[ahb_desc.format]; pdifi2.type = VK_IMAGE_TYPE_2D; // Seems likely pdifi2.tiling = VK_IMAGE_TILING_OPTIMAL; // Ditto if (AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE]; } if (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER & ahb_desc.usage) { pdifi2.usage |= ahb_usage_map_a2v[AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER]; } if (AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_GPU_CUBE_MAP]; } if (AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT & ahb_desc.usage) { pdifi2.flags |= ahb_create_map_a2v[AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT]; } auto ext_img_fmt_props = LvlInitStruct<VkExternalImageFormatProperties>(); auto ifp2 = LvlInitStruct<VkImageFormatProperties2>(&ext_img_fmt_props); VkResult fmt_lookup_result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &pdifi2, &ifp2); if ((VK_SUCCESS != fmt_lookup_result) || (0 == (ext_img_fmt_props.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT))) { skip |= LogError(device, "VUID-VkImportAndroidHardwareBufferInfoANDROID-buffer-01880", "vkAllocateMemory: Neither the VkExternalImageFormatProperties nor the VkExternalBufferProperties " "structs for the AHardwareBuffer include the VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT flag."); } } // Retrieve buffer and format properties of the provided AHardwareBuffer auto ahb_format_props = LvlInitStruct<VkAndroidHardwareBufferFormatPropertiesANDROID>(); auto ahb_props = LvlInitStruct<VkAndroidHardwareBufferPropertiesANDROID>(&ahb_format_props); DispatchGetAndroidHardwareBufferPropertiesANDROID(device, import_ahb_info->buffer, &ahb_props); // allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer if (alloc_info->allocationSize != ahb_props.allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-02383", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, allocationSize (%" PRId64 ") does not match the AHardwareBuffer's reported allocationSize (%" PRId64 ").", alloc_info->allocationSize, ahb_props.allocationSize); } // memoryTypeIndex must be one of those returned by vkGetAndroidHardwareBufferPropertiesANDROID for the AHardwareBuffer // Note: memoryTypeIndex is an index, memoryTypeBits is a bitmask uint32_t mem_type_bitmask = 1 << alloc_info->memoryTypeIndex; if (0 == (mem_type_bitmask & ahb_props.memoryTypeBits)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-02385", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct, memoryTypeIndex (%" PRId32 ") does not correspond to a bit set in AHardwareBuffer's reported " "memoryTypeBits bitmask (0x%" PRIx32 ").", alloc_info->memoryTypeIndex, ahb_props.memoryTypeBits); } // Checks for allocations without a dedicated allocation requirement if ((nullptr == mem_ded_alloc_info) || (VK_NULL_HANDLE == mem_ded_alloc_info->image)) { // the Android hardware buffer must have a format of AHARDWAREBUFFER_FORMAT_BLOB and a usage that includes // AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER if (((uint64_t)AHARDWAREBUFFER_FORMAT_BLOB != ahb_desc.format) || (0 == (ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER))) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02384", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID " "struct without a dedicated allocation requirement, while the AHardwareBuffer_Desc's format ( %u ) is not " "AHARDWAREBUFFER_FORMAT_BLOB or usage (0x%" PRIx64 ") does not include AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER.", ahb_desc.format, ahb_desc.usage); } } else { // Checks specific to import with a dedicated allocation requirement const VkImageCreateInfo *ici = &(GetImageState(mem_ded_alloc_info->image)->createInfo); // The Android hardware buffer's usage must include at least one of AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER or // AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE if (0 == (ahb_desc.usage & (AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER | AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE))) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02386", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID and a " "dedicated allocation requirement, while the AHardwareBuffer's usage (0x%" PRIx64 ") contains neither AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER nor AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE.", ahb_desc.usage); } // the format of image must be VK_FORMAT_UNDEFINED or the format returned by // vkGetAndroidHardwareBufferPropertiesANDROID if ((ici->format != ahb_format_props.format) && (VK_FORMAT_UNDEFINED != ici->format)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02387", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "format (%s) is not VK_FORMAT_UNDEFINED and does not match the AHardwareBuffer's format (%s).", string_VkFormat(ici->format), string_VkFormat(ahb_format_props.format)); } // The width, height, and array layer dimensions of image and the Android hardwarebuffer must be identical if ((ici->extent.width != ahb_desc.width) || (ici->extent.height != ahb_desc.height) || (ici->arrayLayers != ahb_desc.layers)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02388", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained " "VkImportAndroidHardwareBufferInfoANDROID, the dedicated allocation image's " "width, height, and arrayLayers (%" PRId32 " %" PRId32 " %" PRId32 ") do not match those of the AHardwareBuffer (%" PRId32 " %" PRId32 " %" PRId32 ").", ici->extent.width, ici->extent.height, ici->arrayLayers, ahb_desc.width, ahb_desc.height, ahb_desc.layers); } // If the Android hardware buffer's usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE, the image must // have either a full mipmap chain or exactly 1 mip level. // // NOTE! The language of this VUID contradicts the language in the spec (1.1.93), which says "The // AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE flag does not correspond to a Vulkan image usage or creation flag. Instead, // its presence indicates that the Android hardware buffer contains a complete mipmap chain, and its absence indicates // that the Android hardware buffer contains only a single mip level." // // TODO: This code implements the VUID's meaning, but it seems likely that the spec text is actually correct. // Clarification requested. if ((ahb_desc.usage & AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE) && (ici->mipLevels != 1) && (ici->mipLevels != FullMipChainLevels(ici->extent))) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02389", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "usage includes AHARDWAREBUFFER_USAGE_GPU_MIPMAP_COMPLETE but mipLevels (%" PRId32 ") is neither 1 nor full mip " "chain levels (%" PRId32 ").", ici->mipLevels, FullMipChainLevels(ici->extent)); } // each bit set in the usage of image must be listed in AHardwareBuffer Usage Equivalence, and if there is a // corresponding AHARDWAREBUFFER_USAGE bit listed that bit must be included in the Android hardware buffer's // AHardwareBuffer_Desc::usage if (ici->usage & ~(VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "dedicated image usage bits (0x%" PRIx32 ") include an issue not listed in the AHardwareBuffer Usage Equivalence table.", ici->usage); } std::vector<VkImageUsageFlags> usages = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT}; for (VkImageUsageFlags ubit : usages) { if (ici->usage & ubit) { uint64_t ahb_usage = ahb_usage_map_v2a[ubit]; if (0 == (ahb_usage & ahb_desc.usage)) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-02390", "vkAllocateMemory: VkMemoryAllocateInfo struct with chained VkImportAndroidHardwareBufferInfoANDROID, " "The dedicated image usage bit %s equivalent is not in AHardwareBuffer_Desc.usage (0x%" PRIx64 ") ", string_VkImageUsageFlags(ubit).c_str(), ahb_desc.usage); } } } } } else { // Not an import if ((exp_mem_alloc_info) && (mem_ded_alloc_info) && (0 != (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID & exp_mem_alloc_info->handleTypes)) && (VK_NULL_HANDLE != mem_ded_alloc_info->image)) { // This is an Android HW Buffer export if (0 != alloc_info->allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain indicates a dedicated Android Hardware Buffer export allocation, " "but allocationSize is non-zero."); } } else { if (0 == alloc_info->allocationSize) { skip |= LogError( device, "VUID-VkMemoryAllocateInfo-pNext-01874", "vkAllocateMemory: pNext chain does not indicate a dedicated export allocation, but allocationSize is 0."); }; } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(image); if (image_state != nullptr) { if (image_state->IsExternalAHB() && (0 == image_state->GetBoundMemory().size())) { const char *vuid = strcmp(func_name, "vkGetImageMemoryRequirements()") == 0 ? "VUID-vkGetImageMemoryRequirements-image-04004" : "VUID-VkImageMemoryRequirementsInfo2-image-01897"; skip |= LogError(image, vuid, "%s: Attempt get image memory requirements for an image created with a " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType, which has not yet been " "bound to memory.", func_name); } } return skip; } bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID( const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const { bool skip = false; const VkAndroidHardwareBufferUsageANDROID *ahb_usage = LvlFindInChain<VkAndroidHardwareBufferUsageANDROID>(pImageFormatProperties->pNext); if (nullptr != ahb_usage) { const VkPhysicalDeviceExternalImageFormatInfo *pdeifi = LvlFindInChain<VkPhysicalDeviceExternalImageFormatInfo>(pImageFormatInfo->pNext); if ((nullptr == pdeifi) || (VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID != pdeifi->handleType)) { skip |= LogError(device, "VUID-vkGetPhysicalDeviceImageFormatProperties2-pNext-01868", "vkGetPhysicalDeviceImageFormatProperties2: pImageFormatProperties includes a chained " "VkAndroidHardwareBufferUsageANDROID struct, but pImageFormatInfo does not include a chained " "VkPhysicalDeviceExternalImageFormatInfo struct with handleType " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID."); } } return skip; } bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkBuffer buffer) const { bool skip = false; if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) { const char *vuid = (strcmp(func_name, "vkBindBufferMemory()") == 0) ? "VUID-vkBindBufferMemory-memory-02986" : "VUID-VkBindBufferMemoryInfo-memory-02986"; LogObjectList objlist(buffer); objlist.add(memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkBuffer (%s) " "VkExternalMemoryBufferreateInfo::handleType (%s)", func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(handleType).c_str()); } return skip; } bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkImage image) const { bool skip = false; if ((handleType & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0) { const char *vuid = (strcmp(func_name, "vkBindImageMemory()") == 0) ? "VUID-vkBindImageMemory-memory-02990" : "VUID-VkBindImageMemoryInfo-memory-02990"; LogObjectList objlist(image); objlist.add(memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an AHB import operation which is not set " "VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID in the VkImage (%s) " "VkExternalMemoryImageCreateInfo::handleType (%s)", func_name, report_data->FormatHandle(memory).c_str(), report_data->FormatHandle(image).c_str(), string_VkExternalMemoryHandleTypeFlags(handleType).c_str()); } return skip; } #else // !AHB_VALIDATION_SUPPORT // Case building for Android without AHB Validation #ifdef VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::PreCallValidateGetAndroidHardwareBufferPropertiesANDROID( VkDevice device, const struct AHardwareBuffer *buffer, VkAndroidHardwareBufferPropertiesANDROID *pProperties) const { return false; } bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBufferANDROID(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo, struct AHardwareBuffer **pBuffer) const { return false; } #endif // VK_USE_PLATFORM_ANDROID_KHR bool CoreChecks::ValidateAllocateMemoryANDROID(const VkMemoryAllocateInfo *alloc_info) const { return false; } bool CoreChecks::ValidateGetPhysicalDeviceImageFormatProperties2ANDROID( const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, const VkImageFormatProperties2 *pImageFormatProperties) const { return false; } bool CoreChecks::ValidateGetImageMemoryRequirementsANDROID(const VkImage image, const char *func_name) const { return false; } bool CoreChecks::ValidateBufferImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkBuffer buffer) const { return false; } bool CoreChecks::ValidateImageImportedHandleANDROID(const char *func_name, VkExternalMemoryHandleTypeFlags handleType, VkDeviceMemory memory, VkImage image) const { return false; } #endif // AHB_VALIDATION_SUPPORT bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) const { bool skip = false; if (memObjMap.size() >= phys_dev_props.limits.maxMemoryAllocationCount) { skip |= LogError(device, "VUID-vkAllocateMemory-maxMemoryAllocationCount-04101", "vkAllocateMemory: Number of currently valid memory objects is not less than the maximum allowed (%u).", phys_dev_props.limits.maxMemoryAllocationCount); } if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateAllocateMemoryANDROID(pAllocateInfo); } else { if (0 == pAllocateInfo->allocationSize) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-allocationSize-00638", "vkAllocateMemory: allocationSize is 0."); }; } auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(pAllocateInfo->pNext); if (chained_flags_struct && chained_flags_struct->flags == VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00675"); skip |= ValidateDeviceMaskToZero(chained_flags_struct->deviceMask, device, "VUID-VkMemoryAllocateFlagsInfo-deviceMask-00676"); } if (pAllocateInfo->memoryTypeIndex >= phys_dev_mem_props.memoryTypeCount) { skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01714", "vkAllocateMemory: attempting to allocate memory type %u, which is not a valid index. Device only " "advertises %u memory types.", pAllocateInfo->memoryTypeIndex, phys_dev_mem_props.memoryTypeCount); } else { const VkMemoryType memory_type = phys_dev_mem_props.memoryTypes[pAllocateInfo->memoryTypeIndex]; if (pAllocateInfo->allocationSize > phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size) { skip |= LogError(device, "VUID-vkAllocateMemory-pAllocateInfo-01713", "vkAllocateMemory: attempting to allocate %" PRIu64 " bytes from heap %u," "but size of that heap is only %" PRIu64 " bytes.", pAllocateInfo->allocationSize, memory_type.heapIndex, phys_dev_mem_props.memoryHeaps[memory_type.heapIndex].size); } if (!enabled_features.device_coherent_memory_features.deviceCoherentMemory && ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD) != 0)) { skip |= LogError(device, "VUID-vkAllocateMemory-deviceCoherentMemory-02790", "vkAllocateMemory: attempting to allocate memory type %u, which includes the " "VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD memory property, but the deviceCoherentMemory feature " "is not enabled.", pAllocateInfo->memoryTypeIndex); } if ((enabled_features.core11.protectedMemory == VK_FALSE) && ((memory_type.propertyFlags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0)) { skip |= LogError(device, "VUID-VkMemoryAllocateInfo-memoryTypeIndex-01872", "vkAllocateMemory(): attempting to allocate memory type %u, which includes the " "VK_MEMORY_PROPERTY_PROTECTED_BIT memory property, but the protectedMemory feature " "is not enabled.", pAllocateInfo->memoryTypeIndex); } } bool imported_ahb = false; #ifdef AHB_VALIDATION_SUPPORT // "memory is not an imported Android Hardware Buffer" refers to VkImportAndroidHardwareBufferInfoANDROID with a non-NULL // buffer value. Memory imported has another VUID to check size and allocationSize match up auto imported_ahb_info = LvlFindInChain<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo->pNext); if (imported_ahb_info != nullptr) { imported_ahb = imported_ahb_info->buffer != nullptr; } #endif // AHB_VALIDATION_SUPPORT auto dedicated_allocate_info = LvlFindInChain<VkMemoryDedicatedAllocateInfo>(pAllocateInfo->pNext); if (dedicated_allocate_info) { if ((dedicated_allocate_info->buffer != VK_NULL_HANDLE) && (dedicated_allocate_info->image != VK_NULL_HANDLE)) { skip |= LogError(device, "VUID-VkMemoryDedicatedAllocateInfo-image-01432", "vkAllocateMemory: Either buffer or image has to be VK_NULL_HANDLE in VkMemoryDedicatedAllocateInfo"); } else if (dedicated_allocate_info->image != VK_NULL_HANDLE) { // Dedicated VkImage const IMAGE_STATE *image_state = GetImageState(dedicated_allocate_info->image); if (image_state->disjoint == true) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-image-01797", "vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_IMAGE_CREATE_DISJOINT_BIT", report_data->FormatHandle(dedicated_allocate_info->image).c_str()); } else { if ((pAllocateInfo->allocationSize != image_state->requirements[0].size) && (imported_ahb == false)) { const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkMemoryDedicatedAllocateInfo-image-02964" : "VUID-VkMemoryDedicatedAllocateInfo-image-01433"; skip |= LogError(device, vuid, "vkAllocateMemory: Allocation Size (%" PRIu64 ") needs to be equal to VkImage %s VkMemoryRequirements::size (%" PRIu64 ")", pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->image).c_str(), image_state->requirements[0].size); } if ((image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_BINDING_BIT) != 0) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-image-01434", "vkAllocateMemory: VkImage %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_IMAGE_CREATE_SPARSE_BINDING_BIT", report_data->FormatHandle(dedicated_allocate_info->image).c_str()); } } } else if (dedicated_allocate_info->buffer != VK_NULL_HANDLE) { // Dedicated VkBuffer const BUFFER_STATE *buffer_state = GetBufferState(dedicated_allocate_info->buffer); if ((pAllocateInfo->allocationSize != buffer_state->requirements.size) && (imported_ahb == false)) { const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkMemoryDedicatedAllocateInfo-buffer-02965" : "VUID-VkMemoryDedicatedAllocateInfo-buffer-01435"; skip |= LogError( device, vuid, "vkAllocateMemory: Allocation Size (%" PRIu64 ") needs to be equal to VkBuffer %s VkMemoryRequirements::size (%" PRIu64 ")", pAllocateInfo->allocationSize, report_data->FormatHandle(dedicated_allocate_info->buffer).c_str(), buffer_state->requirements.size); } if ((buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) != 0) { skip |= LogError( device, "VUID-VkMemoryDedicatedAllocateInfo-buffer-01436", "vkAllocateMemory: VkBuffer %s can't be used in VkMemoryDedicatedAllocateInfo because it was created with " "VK_BUFFER_CREATE_SPARSE_BINDING_BIT", report_data->FormatHandle(dedicated_allocate_info->buffer).c_str()); } } } // TODO: VUIDs ending in 00643, 00644, 00646, 00647, 01742, 01743, 01745, 00645, 00648, 01744 return skip; } // For given obj node, if it is use, flag a validation error and return callback result, else return false bool CoreChecks::ValidateObjectNotInUse(const BASE_NODE *obj_node, const char *caller_name, const char *error_code) const { if (disabled[object_in_use]) return false; auto obj_struct = obj_node->Handle(); bool skip = false; if (obj_node->InUse()) { skip |= LogError(device, error_code, "Cannot call %s on %s that is currently in use by a command buffer.", caller_name, report_data->FormatHandle(obj_struct).c_str()); } return skip; } bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) const { const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); bool skip = false; if (mem_info) { skip |= ValidateObjectNotInUse(mem_info, "vkFreeMemory", "VUID-vkFreeMemory-memory-00677"); } return skip; } // Validate that given Map memory range is valid. This means that the memory should not already be mapped, // and that the size of the map range should be: // 1. Not zero // 2. Within the size of the memory allocation bool CoreChecks::ValidateMapMemRange(const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize offset, VkDeviceSize size) const { bool skip = false; assert(mem_info); const auto mem = mem_info->mem(); if (size == 0) { skip = LogError(mem, "VUID-vkMapMemory-size-00680", "VkMapMemory: Attempting to map memory range of size zero"); } // It is an application error to call VkMapMemory on an object that is already mapped if (mem_info->mapped_range.size != 0) { skip = LogError(mem, "VUID-vkMapMemory-memory-00678", "VkMapMemory: Attempting to map memory on an already-mapped %s.", report_data->FormatHandle(mem).c_str()); } // Validate offset is not over allocaiton size if (offset >= mem_info->alloc_info.allocationSize) { skip = LogError(mem, "VUID-vkMapMemory-offset-00679", "VkMapMemory: Attempting to map memory with an offset of 0x%" PRIx64 " which is larger than the total array size 0x%" PRIx64, offset, mem_info->alloc_info.allocationSize); } // Validate that offset + size is within object's allocationSize if (size != VK_WHOLE_SIZE) { if ((offset + size) > mem_info->alloc_info.allocationSize) { skip = LogError(mem, "VUID-vkMapMemory-size-00681", "VkMapMemory: Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ".", offset, size + offset, mem_info->alloc_info.allocationSize); } } return skip; } bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) const { // Verify fence status of submitted fences bool skip = false; for (uint32_t i = 0; i < fenceCount; i++) { skip |= VerifyQueueStateToFence(pFences[i]); } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) const { bool skip = false; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", "VUID-vkGetDeviceQueue-queueFamilyIndex-00384"); for (size_t i = 0; i < device_queue_info_list.size(); i++) { const auto device_queue_info = device_queue_info_list.at(i); if (device_queue_info.queue_family_index != queueFamilyIndex) { continue; } // flag must be zero if (device_queue_info.flags != 0) { skip |= LogError( device, "VUID-vkGetDeviceQueue-flags-01841", "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") was created with a non-zero VkDeviceQueueCreateFlags in vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32 "]. Need to use vkGetDeviceQueue2 instead.", queueIndex, device_queue_info.index); } if (device_queue_info.queue_count <= queueIndex) { skip |= LogError(device, "VUID-vkGetDeviceQueue-queueIndex-00385", "vkGetDeviceQueue: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from queueFamilyIndex (=%" PRIu32 ") when the device was created vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32 "] (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, device_queue_info.index, device_queue_info.queue_count); } } return skip; } bool CoreChecks::PreCallValidateGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) const { bool skip = false; if (pQueueInfo) { const uint32_t queueFamilyIndex = pQueueInfo->queueFamilyIndex; const uint32_t queueIndex = pQueueInfo->queueIndex; const VkDeviceQueueCreateFlags flags = pQueueInfo->flags; skip |= ValidateDeviceQueueFamily(queueFamilyIndex, "vkGetDeviceQueue2", "pQueueInfo->queueFamilyIndex", "VUID-VkDeviceQueueInfo2-queueFamilyIndex-01842"); // ValidateDeviceQueueFamily() already checks if queueFamilyIndex but need to make sure flags match with it bool valid_flags = false; for (size_t i = 0; i < device_queue_info_list.size(); i++) { const auto device_queue_info = device_queue_info_list.at(i); // vkGetDeviceQueue2 only checks if both family index AND flags are same as device creation // this handle case where the same queueFamilyIndex is used with/without the protected flag if ((device_queue_info.queue_family_index != queueFamilyIndex) || (device_queue_info.flags != flags)) { continue; } valid_flags = true; if (device_queue_info.queue_count <= queueIndex) { skip |= LogError( device, "VUID-VkDeviceQueueInfo2-queueIndex-01843", "vkGetDeviceQueue2: queueIndex (=%" PRIu32 ") is not less than the number of queues requested from [queueFamilyIndex (=%" PRIu32 "), flags (%s)] combination when the device was created vkCreateDevice::pCreateInfo->pQueueCreateInfos[%" PRIu32 "] (i.e. is not less than %" PRIu32 ").", queueIndex, queueFamilyIndex, string_VkDeviceQueueCreateFlags(flags).c_str(), device_queue_info.index, device_queue_info.queue_count); } } // Don't double error message if already skipping from ValidateDeviceQueueFamily if (!valid_flags && !skip) { skip |= LogError(device, "UNASSIGNED-VkDeviceQueueInfo2", "vkGetDeviceQueue2: The combination of queueFamilyIndex (=%" PRIu32 ") and flags (%s) were never both set together in any element of " "vkCreateDevice::pCreateInfo->pQueueCreateInfos at device creation time.", queueFamilyIndex, string_VkDeviceQueueCreateFlags(flags).c_str()); } } return skip; } bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) const { const QUEUE_STATE *queue_state = GetQueueState(queue); return VerifyQueueStateToSeq(queue_state, queue_state->seq + queue_state->submissions.size()); } bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) const { bool skip = false; const auto &const_queue_map = queueMap; for (auto &queue : const_queue_map) { skip |= VerifyQueueStateToSeq(&queue.second, queue.second.seq + queue.second.submissions.size()); } return skip; } bool CoreChecks::PreCallValidateCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) const { bool skip = false; auto *sem_type_create_info = LvlFindInChain<VkSemaphoreTypeCreateInfo>(pCreateInfo->pNext); if (sem_type_create_info) { if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_TIMELINE && !enabled_features.core12.timelineSemaphore) { skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-timelineSemaphore-03252", "VkCreateSemaphore: timelineSemaphore feature is not enabled, can not create timeline semaphores"); } if (sem_type_create_info->semaphoreType == VK_SEMAPHORE_TYPE_BINARY && sem_type_create_info->initialValue != 0) { skip |= LogError(device, "VUID-VkSemaphoreTypeCreateInfo-semaphoreType-03279", "vkCreateSemaphore: if semaphoreType is VK_SEMAPHORE_TYPE_BINARY, initialValue must be zero"); } } return skip; } bool CoreChecks::PreCallValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const { return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphores"); } bool CoreChecks::PreCallValidateWaitSemaphoresKHR(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout) const { return ValidateWaitSemaphores(device, pWaitInfo, timeout, "VkWaitSemaphoresKHR"); } bool CoreChecks::ValidateWaitSemaphores(VkDevice device, const VkSemaphoreWaitInfo *pWaitInfo, uint64_t timeout, const char *apiName) const { bool skip = false; for (uint32_t i = 0; i < pWaitInfo->semaphoreCount; i++) { auto *semaphore_state = GetSemaphoreState(pWaitInfo->pSemaphores[i]); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(pWaitInfo->pSemaphores[i], "VUID-VkSemaphoreWaitInfo-pSemaphores-03256", "%s(): all semaphores in pWaitInfo must be timeline semaphores, but %s is not", apiName, report_data->FormatHandle(pWaitInfo->pSemaphores[i]).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) const { const FENCE_STATE *fence_node = GetFenceState(fence); bool skip = false; if (fence_node) { if (fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= LogError(fence, "VUID-vkDestroyFence-fence-01120", "%s is in use.", report_data->FormatHandle(fence).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) const { const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); bool skip = false; if (sema_node) { skip |= ValidateObjectNotInUse(sema_node, "vkDestroySemaphore", "VUID-vkDestroySemaphore-semaphore-01137"); } return skip; } bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) const { const EVENT_STATE *event_state = GetEventState(event); bool skip = false; if (event_state) { skip |= ValidateObjectNotInUse(event_state, "vkDestroyEvent", "VUID-vkDestroyEvent-event-01145"); } return skip; } bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) const { if (disabled[query_validation]) return false; const QUERY_POOL_STATE *qp_state = GetQueryPoolState(queryPool); bool skip = false; if (qp_state) { skip |= ValidateObjectNotInUse(qp_state, "vkDestroyQueryPool", "VUID-vkDestroyQueryPool-queryPool-00793"); } return skip; } bool CoreChecks::ValidatePerformanceQueryResults(const char *cmd_name, const QUERY_POOL_STATE *query_pool_state, uint32_t firstQuery, uint32_t queryCount, VkQueryResultFlags flags) const { bool skip = false; if (flags & (VK_QUERY_RESULT_WITH_AVAILABILITY_BIT | VK_QUERY_RESULT_PARTIAL_BIT | VK_QUERY_RESULT_64_BIT)) { string invalid_flags_string; for (auto flag : {VK_QUERY_RESULT_WITH_AVAILABILITY_BIT, VK_QUERY_RESULT_PARTIAL_BIT, VK_QUERY_RESULT_64_BIT}) { if (flag & flags) { if (invalid_flags_string.size()) { invalid_flags_string += " and "; } invalid_flags_string += string_VkQueryResultFlagBits(flag); } } skip |= LogError(query_pool_state->pool(), strcmp(cmd_name, "vkGetQueryPoolResults") == 0 ? "VUID-vkGetQueryPoolResults-queryType-03230" : "VUID-vkCmdCopyQueryPoolResults-queryType-03233", "%s: QueryPool %s was created with a queryType of" "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but flags contains %s.", cmd_name, report_data->FormatHandle(query_pool_state->pool()).c_str(), invalid_flags_string.c_str()); } for (uint32_t query_index = firstQuery; query_index < queryCount; query_index++) { uint32_t submitted = 0; for (uint32_t pass_index = 0; pass_index < query_pool_state->n_performance_passes; pass_index++) { QueryObject obj(QueryObject(query_pool_state->pool(), query_index), pass_index); auto query_pass_iter = queryToStateMap.find(obj); if (query_pass_iter != queryToStateMap.end() && query_pass_iter->second == QUERYSTATE_AVAILABLE) submitted++; } if (submitted < query_pool_state->n_performance_passes) { skip |= LogError(query_pool_state->pool(), "VUID-vkGetQueryPoolResults-queryType-03231", "%s: QueryPool %s has %u performance query passes, but the query has only been " "submitted for %u of the passes.", cmd_name, report_data->FormatHandle(query_pool_state->pool()).c_str(), query_pool_state->n_performance_passes, submitted); } } return skip; } bool CoreChecks::ValidateGetQueryPoolPerformanceResults(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, void *pData, VkDeviceSize stride, VkQueryResultFlags flags, const char *apiName) const { bool skip = false; const auto query_pool_state = GetQueryPoolState(queryPool); if (!query_pool_state || query_pool_state->createInfo.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return skip; if (((((uintptr_t)pData) % sizeof(VkPerformanceCounterResultKHR)) != 0 || (stride % sizeof(VkPerformanceCounterResultKHR)) != 0)) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-03229", "%s(): QueryPool %s was created with a queryType of " "VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR but pData & stride are not multiple of the " "size of VkPerformanceCounterResultKHR.", apiName, report_data->FormatHandle(queryPool).c_str()); } skip |= ValidatePerformanceQueryResults(apiName, query_pool_state, firstQuery, queryCount, flags); return skip; } bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) const { if (disabled[query_validation]) return false; bool skip = false; skip |= ValidateQueryPoolStride("VUID-vkGetQueryPoolResults-flags-02827", "VUID-vkGetQueryPoolResults-flags-00815", stride, "dataSize", dataSize, flags); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkGetQueryPoolResults()", "VUID-vkGetQueryPoolResults-firstQuery-00813", "VUID-vkGetQueryPoolResults-firstQuery-00816"); skip |= ValidateGetQueryPoolPerformanceResults(queryPool, firstQuery, queryCount, pData, stride, flags, "vkGetQueryPoolResults"); const auto query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && (flags & VK_QUERY_RESULT_PARTIAL_BIT)) { skip |= LogError( queryPool, "VUID-vkGetQueryPoolResults-queryType-00818", "%s was created with a queryType of VK_QUERY_TYPE_TIMESTAMP but flags contains VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } if (!skip) { uint32_t query_avail_data = (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) ? 1 : 0; uint32_t query_size_in_bytes = (flags & VK_QUERY_RESULT_64_BIT) ? sizeof(uint64_t) : sizeof(uint32_t); uint32_t query_items = 0; uint32_t query_size = 0; switch (query_pool_state->createInfo.queryType) { case VK_QUERY_TYPE_OCCLUSION: // Occlusion queries write one integer value - the number of samples passed. query_items = 1; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_PIPELINE_STATISTICS: // Pipeline statistics queries write one integer value for each bit that is enabled in the pipelineStatistics // when the pool is created { const int num_bits = sizeof(VkFlags) * CHAR_BIT; std::bitset<num_bits> pipe_stats_bits(query_pool_state->createInfo.pipelineStatistics); query_items = static_cast<uint32_t>(pipe_stats_bits.count()); query_size = query_size_in_bytes * (query_items + query_avail_data); } break; case VK_QUERY_TYPE_TIMESTAMP: // Timestamp queries write one integer query_items = 1; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT: // Transform feedback queries write two integers query_items = 2; query_size = query_size_in_bytes * (query_items + query_avail_data); break; case VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR: // Performance queries store results in a tightly packed array of VkPerformanceCounterResultsKHR query_items = query_pool_state->perf_counter_index_count; query_size = sizeof(VkPerformanceCounterResultKHR) * query_items; if (query_size > stride) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-queryType-04519", "vkGetQueryPoolResults() on querypool %s specified stride %" PRIu64 " which must be at least counterIndexCount (%d) " "multiplied by sizeof(VkPerformanceCounterResultKHR) (%zu).", report_data->FormatHandle(queryPool).c_str(), stride, query_items, sizeof(VkPerformanceCounterResultKHR)); } break; // These cases intentionally fall through to the default case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR: // VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV case VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR: case VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL: default: query_size = 0; break; } if (query_size && (((queryCount - 1) * stride + query_size) > dataSize)) { skip |= LogError(queryPool, "VUID-vkGetQueryPoolResults-dataSize-00817", "vkGetQueryPoolResults() on querypool %s specified dataSize %zu which is " "incompatible with the specified query type and options.", report_data->FormatHandle(queryPool).c_str(), dataSize); } } } return skip; } bool CoreChecks::ValidateInsertMemoryRange(const VulkanTypedHandle &typed_handle, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize memoryOffset, const char *api_name) const { bool skip = false; if (memoryOffset >= mem_info->alloc_info.allocationSize) { const char *error_code = nullptr; if (typed_handle.type == kVulkanObjectTypeBuffer) { if (strcmp(api_name, "vkBindBufferMemory()") == 0) { error_code = "VUID-vkBindBufferMemory-memoryOffset-01031"; } else { error_code = "VUID-VkBindBufferMemoryInfo-memoryOffset-01031"; } } else if (typed_handle.type == kVulkanObjectTypeImage) { if (strcmp(api_name, "vkBindImageMemory()") == 0) { error_code = "VUID-vkBindImageMemory-memoryOffset-01046"; } else { error_code = "VUID-VkBindImageMemoryInfo-memoryOffset-01046"; } } else if (typed_handle.type == kVulkanObjectTypeAccelerationStructureNV) { error_code = "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03621"; } else { // Unsupported object type assert(false); } LogObjectList objlist(mem_info->mem()); objlist.add(typed_handle); skip = LogError(objlist, error_code, "In %s, attempting to bind %s to %s, memoryOffset=0x%" PRIxLEAST64 " must be less than the memory allocation size 0x%" PRIxLEAST64 ".", api_name, report_data->FormatHandle(mem_info->mem()).c_str(), report_data->FormatHandle(typed_handle).c_str(), memoryOffset, mem_info->alloc_info.allocationSize); } return skip; } bool CoreChecks::ValidateInsertImageMemoryRange(VkImage image, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(image, kVulkanObjectTypeImage), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateInsertBufferMemoryRange(VkBuffer buffer, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(buffer, kVulkanObjectTypeBuffer), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateInsertAccelerationStructureMemoryRange(VkAccelerationStructureNV as, const DEVICE_MEMORY_STATE *mem_info, VkDeviceSize mem_offset, const char *api_name) const { return ValidateInsertMemoryRange(VulkanTypedHandle(as, kVulkanObjectTypeAccelerationStructureNV), mem_info, mem_offset, api_name); } bool CoreChecks::ValidateMemoryTypes(const DEVICE_MEMORY_STATE *mem_info, const uint32_t memory_type_bits, const char *funcName, const char *msgCode) const { bool skip = false; if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) { skip = LogError(mem_info->mem(), msgCode, "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory " "type (0x%X) of %s.", funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex, report_data->FormatHandle(mem_info->mem()).c_str()); } return skip; } bool CoreChecks::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset, const char *api_name) const { const BUFFER_STATE *buffer_state = GetBufferState(buffer); bool bind_buffer_mem_2 = strcmp(api_name, "vkBindBufferMemory()") != 0; bool skip = false; if (buffer_state) { // Track objects tied to memory skip = ValidateSetMemBinding(mem, buffer_state->Handle(), api_name); const auto mem_info = GetDevMemState(mem); // Validate memory requirements alignment if (SafeModulo(memoryOffset, buffer_state->requirements.alignment) != 0) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memoryOffset-01036" : "VUID-vkBindBufferMemory-memoryOffset-01036"; skip |= LogError(buffer, vuid, "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, memoryOffset, buffer_state->requirements.alignment); } if (mem_info) { // Validate bound memory range information skip |= ValidateInsertBufferMemoryRange(buffer, mem_info, memoryOffset, api_name); const char *mem_type_vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01035" : "VUID-vkBindBufferMemory-memory-01035"; skip |= ValidateMemoryTypes(mem_info, buffer_state->requirements.memoryTypeBits, api_name, mem_type_vuid); // Validate memory requirements size if (buffer_state->requirements.size > (mem_info->alloc_info.allocationSize - memoryOffset)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-size-01037" : "VUID-vkBindBufferMemory-size-01037"; skip |= LogError(buffer, vuid, "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetBufferMemoryRequirements with buffer.", api_name, mem_info->alloc_info.allocationSize - memoryOffset, buffer_state->requirements.size); } // Validate dedicated allocation if (mem_info->IsDedicatedBuffer() && ((mem_info->dedicated->handle.Cast<VkBuffer>() != buffer) || (memoryOffset != 0))) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-01508" : "VUID-vkBindBufferMemory-memory-01508"; LogObjectList objlist(buffer); objlist.add(mem); objlist.add(mem_info->dedicated->handle); skip |= LogError(objlist, vuid, "%s: for dedicated %s, VkMemoryDedicatedAllocateInfo::buffer %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str(), report_data->FormatHandle(buffer).c_str(), memoryOffset); } auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext); if (enabled_features.core12.bufferDeviceAddress && (buffer_state->createInfo.usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT) && (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT))) { skip |= LogError(buffer, "VUID-vkBindBufferMemory-bufferDeviceAddress-03339", "%s: If buffer was created with the VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT bit set, " "memory must have been allocated with the VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT bit set.", api_name); } // Validate export memory handles if ((mem_info->export_handle_type_flags != 0) && ((mem_info->export_handle_type_flags & buffer_state->external_memory_handle) == 0)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-memory-02726" : "VUID-vkBindBufferMemory-memory-02726"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least one " "handle from VkBuffer (%s) handleType %s.", api_name, report_data->FormatHandle(mem).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str()); } // Validate import memory handles if (mem_info->IsImportAHB() == true) { skip |= ValidateBufferImportedHandleANDROID(api_name, buffer_state->external_memory_handle, mem, buffer); } else if (mem_info->IsImport() == true) { if ((mem_info->import_handle_type_flags & buffer_state->external_memory_handle) == 0) { const char *vuid = nullptr; if ((bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindBufferMemoryInfo-memory-02985"; } else if ((!bind_buffer_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindBufferMemory-memory-02985"; } else if ((bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindBufferMemoryInfo-memory-02727"; } else if ((!bind_buffer_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindBufferMemory-memory-02727"; } LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s which " "is not set in the VkBuffer (%s) VkExternalMemoryBufferCreateInfo::handleType (%s)", api_name, report_data->FormatHandle(mem).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(), report_data->FormatHandle(buffer).c_str(), string_VkExternalMemoryHandleTypeFlags(buffer_state->external_memory_handle).c_str()); } } // Validate mix of protected buffer and memory if ((buffer_state->unprotected == false) && (mem_info->unprotected == true)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01898" : "VUID-vkBindBufferMemory-None-01898"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was not created with protected memory but the VkBuffer (%s) was set " "to use protected memory.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str()); } else if ((buffer_state->unprotected == true) && (mem_info->unprotected == false)) { const char *vuid = bind_buffer_mem_2 ? "VUID-VkBindBufferMemoryInfo-None-01899" : "VUID-vkBindBufferMemory-None-01899"; LogObjectList objlist(buffer); objlist.add(mem); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with protected memory but the VkBuffer (%s) was not set " "to use protected memory.", api_name, report_data->FormatHandle(mem).c_str(), report_data->FormatHandle(buffer).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) const { const char *api_name = "vkBindBufferMemory()"; return ValidateBindBufferMemory(buffer, mem, memoryOffset, api_name); } bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo *pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset, api_name); } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) const { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageMemoryRequirementsANDROID(image, "vkGetImageMemoryRequirements()"); } const IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Checks for no disjoint bit if (image_state->disjoint == true) { skip |= LogError(image, "VUID-vkGetImageMemoryRequirements-image-01588", "vkGetImageMemoryRequirements(): %s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT " "(need to use vkGetImageMemoryRequirements2).", report_data->FormatHandle(image).c_str()); } } return skip; } bool CoreChecks::ValidateGetImageMemoryRequirements2(const VkImageMemoryRequirementsInfo2 *pInfo, const char *func_name) const { bool skip = false; if (device_extensions.vk_android_external_memory_android_hardware_buffer) { skip |= ValidateGetImageMemoryRequirementsANDROID(pInfo->image, func_name); } const IMAGE_STATE *image_state = GetImageState(pInfo->image); const VkFormat image_format = image_state->createInfo.format; const VkImageTiling image_tiling = image_state->createInfo.tiling; const VkImagePlaneMemoryRequirementsInfo *image_plane_info = LvlFindInChain<VkImagePlaneMemoryRequirementsInfo>(pInfo->pNext); if ((FormatIsMultiplane(image_format)) && (image_state->disjoint == true) && (image_plane_info == nullptr)) { skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01589", "%s: %s image was created with a multi-planar format (%s) and " "VK_IMAGE_CREATE_DISJOINT_BIT, but the current pNext doesn't include a " "VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format)); } if ((image_state->disjoint == false) && (image_plane_info != nullptr)) { skip |= LogError(pInfo->image, "VUID-VkImageMemoryRequirementsInfo2-image-01590", "%s: %s image was not created with VK_IMAGE_CREATE_DISJOINT_BIT," "but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str()); } if ((FormatIsMultiplane(image_format) == false) && (image_tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) && (image_plane_info != nullptr)) { const char *vuid = device_extensions.vk_ext_image_drm_format_modifier ? "VUID-VkImageMemoryRequirementsInfo2-image-02280" : "VUID-VkImageMemoryRequirementsInfo2-image-01591"; skip |= LogError(pInfo->image, vuid, "%s: %s image is a single-plane format (%s) and does not have tiling of " "VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT," "but the current pNext includes a VkImagePlaneMemoryRequirementsInfo struct", func_name, report_data->FormatHandle(pInfo->image).c_str(), string_VkFormat(image_format)); } if (image_plane_info != nullptr) { if ((image_tiling == VK_IMAGE_TILING_LINEAR) || (image_tiling == VK_IMAGE_TILING_OPTIMAL)) { // Make sure planeAspect is only a single, valid plane uint32_t planes = FormatPlaneCount(image_format); VkImageAspectFlags aspect = image_plane_info->planeAspect; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) { skip |= LogError( pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281", "%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT.", func_name, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError( pInfo->image, "VUID-VkImagePlaneMemoryRequirementsInfo-planeAspect-02281", "%s: Image %s VkImagePlaneMemoryRequirementsInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.", func_name, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) const { return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2()"); } bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo, VkMemoryRequirements2 *pMemoryRequirements) const { return ValidateGetImageMemoryRequirements2(pInfo, "vkGetImageMemoryRequirements2KHR()"); } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo, VkImageFormatProperties2 *pImageFormatProperties) const { // Can't wrap AHB-specific validation in a device extension check here, but no harm bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(pImageFormatInfo, pImageFormatProperties); return skip; } bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) const { const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); bool skip = false; if (pipeline_state) { skip |= ValidateObjectNotInUse(pipeline_state, "vkDestroyPipeline", "VUID-vkDestroyPipeline-pipeline-00765"); } return skip; } bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) const { const SAMPLER_STATE *sampler_state = GetSamplerState(sampler); bool skip = false; if (sampler_state) { skip |= ValidateObjectNotInUse(sampler_state, "vkDestroySampler", "VUID-vkDestroySampler-sampler-01082"); } return skip; } bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) const { const DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(descriptorPool); bool skip = false; if (desc_pool_state) { skip |= ValidateObjectNotInUse(desc_pool_state, "vkDestroyDescriptorPool", "VUID-vkDestroyDescriptorPool-descriptorPool-00303"); } return skip; } // Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip result // If this is a secondary command buffer, then make sure its primary is also in-flight // If primary is not in-flight, then remove secondary from global in-flight set // This function is only valid at a point when cmdBuffer is being reset or freed bool CoreChecks::CheckCommandBufferInFlight(const CMD_BUFFER_STATE *cb_node, const char *action, const char *error_code) const { bool skip = false; if (cb_node->InUse()) { skip |= LogError(cb_node->commandBuffer(), error_code, "Attempt to %s %s which is in use.", action, report_data->FormatHandle(cb_node->commandBuffer()).c_str()); } return skip; } // Iterate over all cmdBuffers in given commandPool and verify that each is not in use bool CoreChecks::CheckCommandBuffersInFlight(const COMMAND_POOL_STATE *pPool, const char *action, const char *error_code) const { bool skip = false; for (auto cmd_buffer : pPool->commandBuffers) { skip |= CheckCommandBufferInFlight(GetCBState(cmd_buffer), action, error_code); } return skip; } bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) const { bool skip = false; for (uint32_t i = 0; i < commandBufferCount; i++) { const auto *cb_node = GetCBState(pCommandBuffers[i]); // Delete CB information structure, and remove from commandBufferMap if (cb_node) { skip |= CheckCommandBufferInFlight(cb_node, "free", "VUID-vkFreeCommandBuffers-pCommandBuffers-00047"); } } return skip; } bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) const { bool skip = false; skip |= ValidateDeviceQueueFamily(pCreateInfo->queueFamilyIndex, "vkCreateCommandPool", "pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937"); if ((enabled_features.core11.protectedMemory == VK_FALSE) && ((pCreateInfo->flags & VK_COMMAND_POOL_CREATE_PROTECTED_BIT) != 0)) { skip |= LogError(device, "VUID-VkCommandPoolCreateInfo-flags-02860", "vkCreateCommandPool(): the protectedMemory device feature is disabled: CommandPools cannot be created " "with the VK_COMMAND_POOL_CREATE_PROTECTED_BIT set."); } return skip; } bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) const { if (disabled[query_validation]) return false; bool skip = false; if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) { if (!enabled_features.core.pipelineStatisticsQuery) { skip |= LogError(device, "VUID-VkQueryPoolCreateInfo-queryType-00791", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device with " "VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE."); } } if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (!enabled_features.performance_query_features.performanceCounterQueryPools) { skip |= LogError(device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-performanceCounterQueryPools-03237", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created on a device with " "VkPhysicalDevicePerformanceQueryFeaturesKHR.performanceCounterQueryPools == VK_FALSE."); } auto perf_ci = LvlFindInChain<VkQueryPoolPerformanceCreateInfoKHR>(pCreateInfo->pNext); if (!perf_ci) { skip |= LogError( device, "VUID-VkQueryPoolCreateInfo-queryType-03222", "vkCreateQueryPool(): Query pool with type VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR created but the pNext chain of " "pCreateInfo does not contain in instance of VkQueryPoolPerformanceCreateInfoKHR."); } else { const auto &perf_counter_iter = physical_device_state->perf_counters.find(perf_ci->queueFamilyIndex); if (perf_counter_iter == physical_device_state->perf_counters.end()) { skip |= LogError( device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-queueFamilyIndex-03236", "vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::queueFamilyIndex is not a valid queue family index."); } else { const QUEUE_FAMILY_PERF_COUNTERS *perf_counters = perf_counter_iter->second.get(); for (uint32_t idx = 0; idx < perf_ci->counterIndexCount; idx++) { if (perf_ci->pCounterIndices[idx] >= perf_counters->counters.size()) { skip |= LogError( device, "VUID-VkQueryPoolPerformanceCreateInfoKHR-pCounterIndices-03321", "vkCreateQueryPool(): VkQueryPerformanceCreateInfoKHR::pCounterIndices[%u] = %u is not a valid " "counter index.", idx, perf_ci->pCounterIndices[idx]); } } } } } return skip; } bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) const { const COMMAND_POOL_STATE *cp_state = GetCommandPoolState(commandPool); bool skip = false; if (cp_state) { // Verify that command buffers in pool are complete (not in-flight) skip |= CheckCommandBuffersInFlight(cp_state, "destroy command pool with", "VUID-vkDestroyCommandPool-commandPool-00041"); } return skip; } bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) const { const auto *command_pool_state = GetCommandPoolState(commandPool); return CheckCommandBuffersInFlight(command_pool_state, "reset command pool with", "VUID-vkResetCommandPool-commandPool-00040"); } bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) const { bool skip = false; for (uint32_t i = 0; i < fenceCount; ++i) { const auto fence_state = GetFenceState(pFences[i]); if (fence_state && fence_state->scope == kSyncScopeInternal && fence_state->state == FENCE_INFLIGHT) { skip |= LogError(pFences[i], "VUID-vkResetFences-pFences-01123", "%s is in use.", report_data->FormatHandle(pFences[i]).c_str()); } } return skip; } bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) const { const FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(framebuffer); bool skip = false; if (framebuffer_state) { skip |= ValidateObjectNotInUse(framebuffer_state, "vkDestroyFramebuffer", "VUID-vkDestroyFramebuffer-framebuffer-00892"); } return skip; } bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) const { const RENDER_PASS_STATE *rp_state = GetRenderPassState(renderPass); bool skip = false; if (rp_state) { skip |= ValidateObjectNotInUse(rp_state, "vkDestroyRenderPass", "VUID-vkDestroyRenderPass-renderPass-00873"); } return skip; } // Access helper functions for external modules VkFormatProperties CoreChecks::GetPDFormatProperties(const VkFormat format) const { VkFormatProperties format_properties; DispatchGetPhysicalDeviceFormatProperties(physical_device, format, &format_properties); return format_properties; } bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::shared_ptr<PIPELINE_STATE>> const &pipe_state_vec, const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const { bool skip = false; const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits; for (uint32_t i = 0; i < count; i++) { auto pvids_ci = (pipe_cis[i].pVertexInputState) ? LvlFindInChain<VkPipelineVertexInputDivisorStateCreateInfoEXT>(pipe_cis[i].pVertexInputState->pNext) : nullptr; if (nullptr == pvids_ci) continue; const PIPELINE_STATE *pipe_state = pipe_state_vec[i].get(); for (uint32_t j = 0; j < pvids_ci->vertexBindingDivisorCount; j++) { const VkVertexInputBindingDivisorDescriptionEXT *vibdd = &(pvids_ci->pVertexBindingDivisors[j]); if (vibdd->binding >= device_limits->maxVertexInputBindings) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] binding index of (%1u) exceeds device maxVertexInputBindings (%1u).", i, j, vibdd->binding, device_limits->maxVertexInputBindings); } if (vibdd->divisor > phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor of (%1u) exceeds extension maxVertexAttribDivisor (%1u).", i, j, vibdd->divisor, phys_dev_ext_props.vtx_attrib_divisor_props.maxVertexAttribDivisor); } if ((0 == vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateZeroDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor must not be 0 when vertexAttributeInstanceRateZeroDivisor feature is not " "enabled.", i, j); } if ((1 != vibdd->divisor) && !enabled_features.vtx_attrib_divisor_features.vertexAttributeInstanceRateDivisor) { skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] divisor (%1u) must be 1 when vertexAttributeInstanceRateDivisor feature is not " "enabled.", i, j, vibdd->divisor); } // Find the corresponding binding description and validate input rate setting bool failed_01871 = true; for (size_t k = 0; k < pipe_state->vertex_binding_descriptions_.size(); k++) { if ((vibdd->binding == pipe_state->vertex_binding_descriptions_[k].binding) && (VK_VERTEX_INPUT_RATE_INSTANCE == pipe_state->vertex_binding_descriptions_[k].inputRate)) { failed_01871 = false; break; } } if (failed_01871) { // Description not found, or has incorrect inputRate value skip |= LogError( device, "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871", "vkCreateGraphicsPipelines(): Pipeline[%1u] with chained VkPipelineVertexInputDivisorStateCreateInfoEXT, " "pVertexBindingDivisors[%1u] specifies binding index (%1u), but that binding index's " "VkVertexInputBindingDescription.inputRate member is not VK_VERTEX_INPUT_RATE_INSTANCE.", i, j, vibdd->binding); } } } return skip; } bool CoreChecks::ValidatePipelineCacheControlFlags(VkPipelineCreateFlags flags, uint32_t index, const char *caller_name, const char *vuid) const { bool skip = false; if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) { const VkPipelineCreateFlags invalid_flags = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT | VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT; if ((flags & invalid_flags) != 0) { skip |= LogError(device, vuid, "%s(): pipelineCreationCacheControl is turned off but pipeline[%u] has VkPipelineCreateFlags " "containing VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT or " "VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT", caller_name, index); } } return skip; } bool CoreChecks::PreCallValidateCreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) const { bool skip = false; if (enabled_features.pipeline_creation_cache_control_features.pipelineCreationCacheControl == VK_FALSE) { if ((pCreateInfo->flags & VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT) != 0) { skip |= LogError(device, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892", "vkCreatePipelineCache(): pipelineCreationCacheControl is turned off but pCreateInfo::flags contains " "VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT"); } } return skip; } bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *cgpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, cgpl_state_data); create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data); for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineLocked(cgpl_state->pipe_state, i); } for (uint32_t i = 0; i < count; i++) { skip |= ValidatePipelineUnlocked(cgpl_state->pipe_state[i].get(), i); } if (device_extensions.vk_ext_vertex_attribute_divisor) { skip |= ValidatePipelineVertexDivisors(cgpl_state->pipe_state, count, pCreateInfos); } if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { for (uint32_t i = 0; i < count; ++i) { // Validate depth-stencil state auto raster_state_ci = pCreateInfos[i].pRasterizationState; if ((VK_FALSE == enabled_features.portability_subset_features.separateStencilMaskRef) && raster_state_ci && (VK_CULL_MODE_NONE == raster_state_ci->cullMode)) { auto depth_stencil_ci = pCreateInfos[i].pDepthStencilState; if (depth_stencil_ci && (VK_TRUE == depth_stencil_ci->stencilTestEnable) && (depth_stencil_ci->front.reference != depth_stencil_ci->back.reference)) { skip |= LogError(device, "VUID-VkPipelineDepthStencilStateCreateInfo-separateStencilMaskRef-04453", "Invalid Pipeline CreateInfo[%d] (portability error): VkStencilOpState::reference must be the " "same for front and back", i); } } // Validate color attachments uint32_t subpass = pCreateInfos[i].subpass; const auto *render_pass = GetRenderPassState(pCreateInfos[i].renderPass); bool ignore_color_blend_state = pCreateInfos[i].pRasterizationState->rasterizerDiscardEnable || render_pass->createInfo.pSubpasses[subpass].colorAttachmentCount == 0; if ((VK_FALSE == enabled_features.portability_subset_features.constantAlphaColorBlendFactors) && !ignore_color_blend_state) { auto color_blend_state = pCreateInfos[i].pColorBlendState; const auto attachments = color_blend_state->pAttachments; for (uint32_t color_attachment_index = 0; i < color_blend_state->attachmentCount; ++i) { if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor) || (VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].srcColorBlendFactor)) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04454", "Invalid Pipeline CreateInfo[%d] (portability error): srcColorBlendFactor for color attachment %d must " "not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA", i, color_attachment_index); } if ((VK_BLEND_FACTOR_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor) || (VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA == attachments[color_attachment_index].dstColorBlendFactor)) { skip |= LogError( device, "VUID-VkPipelineColorBlendAttachmentState-constantAlphaColorBlendFactors-04455", "Invalid Pipeline CreateInfo[%d] (portability error): dstColorBlendFactor for color attachment %d must " "not be VK_BLEND_FACTOR_CONSTANT_ALPHA or VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA", i, color_attachment_index); } } } } } return skip; } void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result, void *cgpl_state_data) { ValidationStateTracker::PostCallRecordCreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, result, cgpl_state_data); if (enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate) { for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline_state = GetPipelineState(pPipelines[i]); RecordGraphicsPipelineShaderDynamicState(pipeline_state); } } } bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *ccpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, ccpl_state_data); auto *ccpl_state = reinterpret_cast<create_compute_pipeline_api_state *>(ccpl_state_data); for (uint32_t i = 0; i < count; i++) { // TODO: Add Compute Pipeline Verification skip |= ValidateComputePipelineShaderState(ccpl_state->pipe_state[i].get()); skip |= ValidatePipelineCacheControlFlags(pCreateInfos->flags, i, "vkCreateComputePipelines", "VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875"); } return skip; } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoNV *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesNV(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, crtpl_state_data); auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_api_state *>(crtpl_state_data); for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get(); if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { const PIPELINE_STATE *base_pipeline = nullptr; if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) { base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get(); } else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle); } if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError( device, "VUID-vkCreateRayTracingPipelinesNV-flags-03416", "vkCreateRayTracingPipelinesNV: If the flags member of any element of pCreateInfos contains the " "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag," "the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set."); } } skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ false); skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesNV", "VUID-VkRayTracingPipelineCreateInfoNV-pipelineCreationCacheControl-02905"); } return skip; } bool CoreChecks::PreCallValidateCreateRayTracingPipelinesKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, VkPipelineCache pipelineCache, uint32_t count, const VkRayTracingPipelineCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, void *crtpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateRayTracingPipelinesKHR(device, deferredOperation, pipelineCache, count, pCreateInfos, pAllocator, pPipelines, crtpl_state_data); auto *crtpl_state = reinterpret_cast<create_ray_tracing_pipeline_khr_api_state *>(crtpl_state_data); for (uint32_t i = 0; i < count; i++) { PIPELINE_STATE *pipeline = crtpl_state->pipe_state[i].get(); if (pipeline->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) { const PIPELINE_STATE *base_pipeline = nullptr; if (pipeline->raytracingPipelineCI.basePipelineIndex != -1) { base_pipeline = crtpl_state->pipe_state[pipeline->raytracingPipelineCI.basePipelineIndex].get(); } else if (pipeline->raytracingPipelineCI.basePipelineHandle != VK_NULL_HANDLE) { base_pipeline = GetPipelineState(pipeline->raytracingPipelineCI.basePipelineHandle); } if (!base_pipeline || !(base_pipeline->getPipelineCreateFlags() & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) { skip |= LogError( device, "VUID-vkCreateRayTracingPipelinesKHR-flags-03416", "vkCreateRayTracingPipelinesKHR: If the flags member of any element of pCreateInfos contains the " "VK_PIPELINE_CREATE_DERIVATIVE_BIT flag," "the base pipeline must have been created with the VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT flag set."); } } skip |= ValidateRayTracingPipeline(pipeline, pCreateInfos[i].flags, /*isKHR*/ true); skip |= ValidatePipelineCacheControlFlags(pCreateInfos[i].flags, i, "vkCreateRayTracingPipelinesKHR", "VUID-VkRayTracingPipelineCreateInfoKHR-pipelineCreationCacheControl-02905"); } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutablePropertiesKHR(VkDevice device, const VkPipelineInfoKHR *pPipelineInfo, uint32_t *pExecutableCount, VkPipelineExecutablePropertiesKHR *pProperties) const { bool skip = false; skip |= ValidatePipelineExecutableInfo(device, nullptr, "vkGetPipelineExecutablePropertiesKHR", "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270"); return skip; } bool CoreChecks::ValidatePipelineExecutableInfo(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, const char *caller_name, const char *feature_vuid) const { bool skip = false; if (!enabled_features.pipeline_exe_props_features.pipelineExecutableInfo) { skip |= LogError(device, feature_vuid, "%s(): called when pipelineExecutableInfo feature is not enabled.", caller_name); } // vkGetPipelineExecutablePropertiesKHR will not have struct to validate further if (pExecutableInfo) { auto pi = LvlInitStruct<VkPipelineInfoKHR>(); pi.pipeline = pExecutableInfo->pipeline; // We could probably cache this instead of fetching it every time uint32_t executable_count = 0; DispatchGetPipelineExecutablePropertiesKHR(device, &pi, &executable_count, NULL); if (pExecutableInfo->executableIndex >= executable_count) { skip |= LogError( pExecutableInfo->pipeline, "VUID-VkPipelineExecutableInfoKHR-executableIndex-03275", "%s(): VkPipelineExecutableInfo::executableIndex (%1u) must be less than the number of executables associated with " "the pipeline (%1u) as returned by vkGetPipelineExecutablePropertiessKHR", caller_name, pExecutableInfo->executableIndex, executable_count); } } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutableStatisticsKHR(VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pStatisticCount, VkPipelineExecutableStatisticKHR *pStatistics) const { bool skip = false; skip |= ValidatePipelineExecutableInfo(device, pExecutableInfo, "vkGetPipelineExecutableStatisticsKHR", "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272"); const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline); if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR)) { skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274", "vkGetPipelineExecutableStatisticsKHR called on a pipeline created without the " "VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR flag set"); } return skip; } bool CoreChecks::PreCallValidateGetPipelineExecutableInternalRepresentationsKHR( VkDevice device, const VkPipelineExecutableInfoKHR *pExecutableInfo, uint32_t *pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR *pStatistics) const { bool skip = false; skip |= ValidatePipelineExecutableInfo(device, pExecutableInfo, "vkGetPipelineExecutableInternalRepresentationsKHR", "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipelineExecutableInfo-03276"); const PIPELINE_STATE *pipeline_state = GetPipelineState(pExecutableInfo->pipeline); if (!(pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR)) { skip |= LogError(pExecutableInfo->pipeline, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278", "vkGetPipelineExecutableInternalRepresentationsKHR called on a pipeline created without the " "VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR flag set"); } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) const { return cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( this, pCreateInfo, IsExtEnabled(device_extensions.vk_khr_push_descriptor), phys_dev_ext_props.max_push_descriptors, IsExtEnabled(device_extensions.vk_ext_descriptor_indexing), &enabled_features.core12, &enabled_features.inline_uniform_block, &phys_dev_ext_props.inline_uniform_block_props, &device_extensions); } enum DSL_DESCRIPTOR_GROUPS { DSL_TYPE_SAMPLERS = 0, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK, DSL_NUM_DESCRIPTOR_GROUPS }; // Used by PreCallValidateCreatePipelineLayout. // Returns an array of size DSL_NUM_DESCRIPTOR_GROUPS of the maximum number of descriptors used in any single pipeline stage std::valarray<uint32_t> GetDescriptorCountMaxPerStage( const DeviceFeatures *enabled_features, const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { // Identify active pipeline stages std::vector<VkShaderStageFlags> stage_flags = {VK_SHADER_STAGE_VERTEX_BIT, VK_SHADER_STAGE_FRAGMENT_BIT, VK_SHADER_STAGE_COMPUTE_BIT}; if (enabled_features->core.geometryShader) { stage_flags.push_back(VK_SHADER_STAGE_GEOMETRY_BIT); } if (enabled_features->core.tessellationShader) { stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); stage_flags.push_back(VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); } // Allow iteration over enum values std::vector<DSL_DESCRIPTOR_GROUPS> dsl_groups = { DSL_TYPE_SAMPLERS, DSL_TYPE_UNIFORM_BUFFERS, DSL_TYPE_STORAGE_BUFFERS, DSL_TYPE_SAMPLED_IMAGES, DSL_TYPE_STORAGE_IMAGES, DSL_TYPE_INPUT_ATTACHMENTS, DSL_TYPE_INLINE_UNIFORM_BLOCK}; // Sum by layouts per stage, then pick max of stages per type std::valarray<uint32_t> max_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // max descriptor sum among all pipeline stages for (auto stage : stage_flags) { std::valarray<uint32_t> stage_sum(0U, DSL_NUM_DESCRIPTOR_GROUPS); // per-stage sums for (const auto &dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (0 != (stage & binding->stageFlags) && binding->descriptorCount > 0) { switch (binding->descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_UNIFORM_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: stage_sum[DSL_TYPE_STORAGE_BUFFERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: stage_sum[DSL_TYPE_STORAGE_IMAGES] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: stage_sum[DSL_TYPE_SAMPLED_IMAGES] += binding->descriptorCount; stage_sum[DSL_TYPE_SAMPLERS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: stage_sum[DSL_TYPE_INPUT_ATTACHMENTS] += binding->descriptorCount; break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: // count one block per binding. descriptorCount is number of bytes stage_sum[DSL_TYPE_INLINE_UNIFORM_BLOCK]++; break; default: break; } } } } for (auto type : dsl_groups) { max_sum[type] = std::max(stage_sum[type], max_sum[type]); } } return max_sum; } // Used by PreCallValidateCreatePipelineLayout. // Returns a map indexed by VK_DESCRIPTOR_TYPE_* enum of the summed descriptors by type. // Note: descriptors only count against the limit once even if used by multiple stages. std::map<uint32_t, uint32_t> GetDescriptorSum( const std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> &set_layouts, bool skip_update_after_bind) { std::map<uint32_t, uint32_t> sum_by_type; for (const auto &dsl : set_layouts) { if (skip_update_after_bind && (dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (binding->descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { // count one block per binding. descriptorCount is number of bytes sum_by_type[binding->descriptorType]++; } else { sum_by_type[binding->descriptorType] += binding->descriptorCount; } } } } return sum_by_type; } bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) const { bool skip = false; std::vector<std::shared_ptr<cvdescriptorset::DescriptorSetLayout const>> set_layouts(pCreateInfo->setLayoutCount, nullptr); unsigned int push_descriptor_set_count = 0; { for (uint32_t i = 0; i < pCreateInfo->setLayoutCount; ++i) { set_layouts[i] = GetDescriptorSetLayoutShared(pCreateInfo->pSetLayouts[i]); if (set_layouts[i]->IsPushDescriptor()) ++push_descriptor_set_count; } } if (push_descriptor_set_count > 1) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293", "vkCreatePipelineLayout() Multiple push descriptor sets found."); } // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, true); // Samplers if (max_descriptors_per_stage[DSL_TYPE_SAMPLERS] > phys_dev_props.limits.maxPerStageDescriptorSamplers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorSamplers limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLERS], phys_dev_props.limits.maxPerStageDescriptorSamplers); } // Uniform buffers if (max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorUniformBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUniformBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorUniformBuffers); } // Storage buffers if (max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props.limits.maxPerStageDescriptorStorageBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorStorageBuffers limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props.limits.maxPerStageDescriptorStorageBuffers); } // Sampled images if (max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorSampledImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorSampledImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props.limits.maxPerStageDescriptorSampledImages); } // Storage images if (max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props.limits.maxPerStageDescriptorStorageImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorStorageImages limit (%d).", max_descriptors_per_stage[DSL_TYPE_STORAGE_IMAGES], phys_dev_props.limits.maxPerStageDescriptorStorageImages); } // Input attachments if (max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props.limits.maxPerStageDescriptorInputAttachments) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorInputAttachments limit (%d).", max_descriptors_per_stage[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props.limits.maxPerStageDescriptorInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02214" : "VUID-VkPipelineLayoutCreateInfo-descriptorType-02212"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorInlineUniformBlocks limit (%d).", max_descriptors_per_stage[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorInlineUniformBlocks); } // Total descriptors by type // std::map<uint32_t, uint32_t> sum_all_stages = GetDescriptorSum(set_layouts, true); // Samplers uint32_t sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props.limits.maxDescriptorSetSamplers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetSamplers limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSamplers); } // Uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props.limits.maxDescriptorSetUniformBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props.limits.maxDescriptorSetUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUniformBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetUniformBuffersDynamic); } // Storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props.limits.maxDescriptorSetStorageBuffers) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffers limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props.limits.maxDescriptorSetStorageBuffers); } // Dynamic storage buffers if (sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageBuffersDynamic limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props.limits.maxDescriptorSetStorageBuffersDynamic); } // Sampled images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetSampledImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetSampledImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetSampledImages); } // Storage images sum = sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props.limits.maxDescriptorSetStorageImages) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetStorageImages limit (%d).", sum, phys_dev_props.limits.maxDescriptorSetStorageImages); } // Input attachments if (sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props.limits.maxDescriptorSetInputAttachments) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetInputAttachments limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props.limits.maxDescriptorSetInputAttachments); } // Inline uniform blocks if (sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks) { const char *vuid = (device_extensions.vk_ext_descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-02216" : "VUID-VkPipelineLayoutCreateInfo-descriptorType-02213"; skip |= LogError(device, vuid, "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetInlineUniformBlocks limit (%d).", sum_all_stages[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks); } if (device_extensions.vk_ext_descriptor_indexing) { // XXX TODO: replace with correct VU messages // Max descriptors by type, within a single pipeline stage std::valarray<uint32_t> max_descriptors_per_stage_update_after_bind = GetDescriptorCountMaxPerStage(&enabled_features, set_layouts, false); // Samplers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022", "vkCreatePipelineLayout(): max per-stage sampler bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSamplers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSamplers); } // Uniform buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023", "vkCreatePipelineLayout(): max per-stage uniform buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindUniformBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_UNIFORM_BUFFERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindUniformBuffers); } // Storage buffers if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024", "vkCreatePipelineLayout(): max per-stage storage buffer bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageBuffers limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_BUFFERS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageBuffers); } // Sampled images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025", "vkCreatePipelineLayout(): max per-stage sampled image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindSampledImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_SAMPLED_IMAGES], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindSampledImages); } // Storage images if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026", "vkCreatePipelineLayout(): max per-stage storage image bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindStorageImages limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_STORAGE_IMAGES], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindStorageImages); } // Input attachments if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS] > phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027", "vkCreatePipelineLayout(): max per-stage input attachment bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInputAttachments limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INPUT_ATTACHMENTS], phys_dev_props_core12.maxPerStageDescriptorUpdateAfterBindInputAttachments); } // Inline uniform blocks if (max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK] > phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02215", "vkCreatePipelineLayout(): max per-stage inline uniform block bindings count (%d) exceeds device " "maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks limit (%d).", max_descriptors_per_stage_update_after_bind[DSL_TYPE_INLINE_UNIFORM_BLOCK], phys_dev_ext_props.inline_uniform_block_props.maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks); } // Total descriptors by type, summed across all pipeline stages // std::map<uint32_t, uint32_t> sum_all_stages_update_after_bind = GetDescriptorSum(set_layouts, false); // Samplers sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036", "vkCreatePipelineLayout(): sum of sampler bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSamplers limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSamplers); } // Uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037", "vkCreatePipelineLayout(): sum of uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffers); } // Dynamic uniform buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038", "vkCreatePipelineLayout(): sum of dynamic uniform buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindUniformBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic); } // Storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039", "vkCreatePipelineLayout(): sum of storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffers limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffers); } // Dynamic storage buffers if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040", "vkCreatePipelineLayout(): sum of dynamic storage buffer bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageBuffersDynamic limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic); } // Sampled images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041", "vkCreatePipelineLayout(): sum of sampled image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindSampledImages limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindSampledImages); } // Storage images sum = sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_IMAGE] + sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER]; if (sum > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042", "vkCreatePipelineLayout(): sum of storage image bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindStorageImages limit (%d).", sum, phys_dev_props_core12.maxDescriptorSetUpdateAfterBindStorageImages); } // Input attachments if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT] > phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043", "vkCreatePipelineLayout(): sum of input attachment bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInputAttachments limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT], phys_dev_props_core12.maxDescriptorSetUpdateAfterBindInputAttachments); } // Inline uniform blocks if (sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT] > phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-descriptorType-02217", "vkCreatePipelineLayout(): sum of inline uniform block bindings among all stages (%d) exceeds device " "maxDescriptorSetUpdateAfterBindInlineUniformBlocks limit (%d).", sum_all_stages_update_after_bind[VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT], phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetUpdateAfterBindInlineUniformBlocks); } } if (device_extensions.vk_ext_fragment_density_map_2) { uint32_t sum_subsampled_samplers = 0; for (const auto &dsl : set_layouts) { // find the number of subsampled samplers across all stages // NOTE: this does not use the GetDescriptorSum patter because it needs the GetSamplerState method if ((dsl->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT)) { continue; } for (uint32_t binding_idx = 0; binding_idx < dsl->GetBindingCount(); binding_idx++) { const VkDescriptorSetLayoutBinding *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(binding_idx); // Bindings with a descriptorCount of 0 are "reserved" and should be skipped if (binding->descriptorCount > 0) { if (((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) || (binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER)) && (binding->pImmutableSamplers != nullptr)) { for (uint32_t sampler_idx = 0; sampler_idx < binding->descriptorCount; sampler_idx++) { const SAMPLER_STATE *state = GetSamplerState(binding->pImmutableSamplers[sampler_idx]); if (state->createInfo.flags & (VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT | VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT)) { sum_subsampled_samplers++; } } } } } } if (sum_subsampled_samplers > phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers) { skip |= LogError(device, "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566", "vkCreatePipelineLayout(): sum of sampler bindings with flags containing " "VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT or " "VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT among all stages(% d) " "exceeds device maxDescriptorSetSubsampledSamplers limit (%d).", sum_subsampled_samplers, phys_dev_ext_props.fragment_density_map2_props.maxDescriptorSetSubsampledSamplers); } } return skip; } bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) const { // Make sure sets being destroyed are not currently in-use if (disabled[object_in_use]) return false; bool skip = false; const DESCRIPTOR_POOL_STATE *pool = GetDescriptorPoolState(descriptorPool); if (pool != nullptr) { for (auto *ds : pool->sets) { if (ds && ds->InUse()) { skip |= LogError(descriptorPool, "VUID-vkResetDescriptorPool-descriptorPool-00313", "It is invalid to call vkResetDescriptorPool() with descriptor sets in use by a command buffer."); if (skip) break; } } } return skip; } // Ensure the pool contains enough descriptors and descriptor sets to satisfy // an allocation request. Fills common_data with the total number of descriptors of each type required, // as well as DescriptorSetLayout ptrs used for later update. bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets, void *ads_state_data) const { StateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data); cvdescriptorset::AllocateDescriptorSetsData *ads_state = reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data); // All state checks for AllocateDescriptorSets is done in single function return ValidateAllocateDescriptorSets(pAllocateInfo, ads_state); } bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) const { // Make sure that no sets being destroyed are in-flight bool skip = false; // First make sure sets being destroyed are not currently in-use for (uint32_t i = 0; i < count; ++i) { if (pDescriptorSets[i] != VK_NULL_HANDLE) { skip |= ValidateIdleDescriptorSet(pDescriptorSets[i], "vkFreeDescriptorSets"); } } const DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(descriptorPool); if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) { // Can't Free from a NON_FREE pool skip |= LogError(descriptorPool, "VUID-vkFreeDescriptorSets-descriptorPool-00312", "It is invalid to call vkFreeDescriptorSets() with a pool created without setting " "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT."); } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) const { // First thing to do is perform map look-ups. // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets // so we can't just do a single map look-up up-front, but do them individually in functions below // Now make call(s) that validate state, but don't perform state updates in this function // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the // namespace which will parse params and make calls into specific class instances return ValidateUpdateDescriptorSets(descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()"); } bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; bool skip = false; if (cb_state->InUse()) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049", "Calling vkBeginCommandBuffer() on active %s before it has completed. You must check " "command buffer fence before this call.", report_data->FormatHandle(commandBuffer).c_str()); } if (cb_state->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { // Primary Command Buffer const VkCommandBufferUsageFlags invalid_usage = (VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT); if ((pBeginInfo->flags & invalid_usage) == invalid_usage) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-02840", "vkBeginCommandBuffer(): Primary %s can't have both VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT and " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(commandBuffer).c_str()); } } else { // Secondary Command Buffer const VkCommandBufferInheritanceInfo *info = pBeginInfo->pInheritanceInfo; if (!info) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00051", "vkBeginCommandBuffer(): Secondary %s must have inheritance info.", report_data->FormatHandle(commandBuffer).c_str()); } else { if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { assert(info->renderPass); const auto *framebuffer = GetFramebufferState(info->framebuffer); if (framebuffer) { if (framebuffer->createInfo.renderPass != info->renderPass) { const auto *render_pass = GetRenderPassState(info->renderPass); // renderPass that framebuffer was created with must be compatible with local renderPass skip |= ValidateRenderPassCompatibility("framebuffer", framebuffer->rp_state.get(), "command buffer", render_pass, "vkBeginCommandBuffer()", "VUID-VkCommandBufferBeginInfo-flags-00055"); } } } if ((info->occlusionQueryEnable == VK_FALSE || enabled_features.core.occlusionQueryPrecise == VK_FALSE) && (info->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00052", "vkBeginCommandBuffer(): Secondary %s must not have VK_QUERY_CONTROL_PRECISE_BIT if " "occulusionQuery is disabled or the device does not support precise occlusion queries.", report_data->FormatHandle(commandBuffer).c_str()); } auto p_inherited_viewport_scissor_info = LvlFindInChain<VkCommandBufferInheritanceViewportScissorInfoNV>(info->pNext); if (p_inherited_viewport_scissor_info != nullptr && p_inherited_viewport_scissor_info->viewportScissor2D) { if (!enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04782", "vkBeginCommandBuffer(): inheritedViewportScissor2D feature not enabled."); } if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04786", "vkBeginCommandBuffer(): Secondary %s must be recorded with the" "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT if viewportScissor2D is VK_TRUE.", report_data->FormatHandle(commandBuffer).c_str()); } if (p_inherited_viewport_scissor_info->viewportDepthCount == 0) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferInheritanceViewportScissorInfoNV-viewportScissor2D-04784", "vkBeginCommandBuffer(): " "If viewportScissor2D is VK_TRUE, then viewportDepthCount must be greater than 0."); } } } if (info && info->renderPass != VK_NULL_HANDLE) { const auto *render_pass = GetRenderPassState(info->renderPass); if (render_pass) { if (info->subpass >= render_pass->createInfo.subpassCount) { skip |= LogError(commandBuffer, "VUID-VkCommandBufferBeginInfo-flags-00054", "vkBeginCommandBuffer(): Secondary %s must have a subpass index (%d) that is " "less than the number of subpasses (%d).", report_data->FormatHandle(commandBuffer).c_str(), info->subpass, render_pass->createInfo.subpassCount); } } } } if (CB_RECORDING == cb_state->state) { skip |= LogError(commandBuffer, "VUID-vkBeginCommandBuffer-commandBuffer-00049", "vkBeginCommandBuffer(): Cannot call Begin on %s in the RECORDING state. Must first call " "vkEndCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } else if (CB_RECORDED == cb_state->state || CB_INVALID_COMPLETE == cb_state->state) { VkCommandPool cmd_pool = cb_state->createInfo.commandPool; const auto *pool = cb_state->command_pool.get(); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) { LogObjectList objlist(commandBuffer); objlist.add(cmd_pool); skip |= LogError(objlist, "VUID-vkBeginCommandBuffer-commandBuffer-00050", "Call to vkBeginCommandBuffer() on %s attempts to implicitly reset cmdBuffer created from " "%s that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str()); } } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupCommandBufferBeginInfo>(pBeginInfo->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, commandBuffer, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00106"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, commandBuffer, "VUID-VkDeviceGroupCommandBufferBeginInfo-deviceMask-00107"); } return skip; } bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; bool skip = false; if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == cb_state->createInfo.level) || !(cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // This needs spec clarification to update valid usage, see comments in PR: // https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/165 skip |= InsideRenderPass(cb_state, "vkEndCommandBuffer()", "VUID-vkEndCommandBuffer-commandBuffer-00060"); } if (cb_state->state == CB_INVALID_COMPLETE || cb_state->state == CB_INVALID_INCOMPLETE) { skip |= ReportInvalidCommandBuffer(cb_state, "vkEndCommandBuffer()"); } else if (CB_RECORDING != cb_state->state) { skip |= LogError( commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00059", "vkEndCommandBuffer(): Cannot call End on %s when not in the RECORDING state. Must first call vkBeginCommandBuffer().", report_data->FormatHandle(commandBuffer).c_str()); } for (const auto &query : cb_state->activeQueries) { skip |= LogError(commandBuffer, "VUID-vkEndCommandBuffer-commandBuffer-00061", "vkEndCommandBuffer(): Ending command buffer with in progress query: %s, query %d.", report_data->FormatHandle(query.pool).c_str(), query.query); } return skip; } bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); if (!cb_state) return false; VkCommandPool cmd_pool = cb_state->createInfo.commandPool; const auto *pool = cb_state->command_pool.get(); if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pool->createFlags)) { LogObjectList objlist(commandBuffer); objlist.add(cmd_pool); skip |= LogError(objlist, "VUID-vkResetCommandBuffer-commandBuffer-00046", "vkResetCommandBuffer(): Attempt to reset %s created from %s that does NOT have the " "VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set.", report_data->FormatHandle(commandBuffer).c_str(), report_data->FormatHandle(cmd_pool).c_str()); } skip |= CheckCommandBufferInFlight(cb_state, "reset", "VUID-vkResetCommandBuffer-commandBuffer-00045"); return skip; } static const char *GetPipelineTypeName(VkPipelineBindPoint pipelineBindPoint) { switch (pipelineBindPoint) { case VK_PIPELINE_BIND_POINT_GRAPHICS: return "graphics"; case VK_PIPELINE_BIND_POINT_COMPUTE: return "compute"; case VK_PIPELINE_BIND_POINT_RAY_TRACING_NV: return "ray-tracing"; default: return "unknown"; } } bool CoreChecks::ValidateGraphicsPipelineBindPoint(const CMD_BUFFER_STATE *cb_state, const PIPELINE_STATE *pipeline_state) const { bool skip = false; const FRAMEBUFFER_STATE *fb_state = cb_state->activeFramebuffer.get(); if (fb_state) { auto subpass_desc = &pipeline_state->rp_state->createInfo.pSubpasses[pipeline_state->graphicsPipelineCI.subpass]; for (size_t i = 0; i < pipeline_state->attachments.size() && i < subpass_desc->colorAttachmentCount; i++) { const auto attachment = subpass_desc->pColorAttachments[i].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; const auto *imageview_state = cb_state->GetActiveAttachmentImageViewState(attachment); if (!imageview_state) continue; const IMAGE_STATE *image_state = GetImageState(imageview_state->create_info.image); if (!image_state) continue; const VkFormat format = pipeline_state->rp_state->createInfo.pAttachments[attachment].format; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(format); if (pipeline_state->graphicsPipelineCI.pRasterizationState && !pipeline_state->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable && pipeline_state->attachments[i].blendEnable && !(format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) { skip |= LogError(device, "VUID-VkGraphicsPipelineCreateInfo-blendEnable-04717", "vkCreateGraphicsPipelines(): pipeline.pColorBlendState.pAttachments[" PRINTF_SIZE_T_SPECIFIER "].blendEnable is VK_TRUE but format %s associated with this attached image (%s) does " "not support VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT.", i, report_data->FormatHandle(image_state->image()).c_str(), string_VkFormat(format)); } } } if (cb_state->inheritedViewportDepths.size() != 0) { bool dyn_viewport = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT) || IsDynamic(pipeline_state, VK_DYNAMIC_STATE_VIEWPORT); bool dyn_scissor = IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT) || IsDynamic(pipeline_state, VK_DYNAMIC_STATE_SCISSOR); if (!dyn_viewport || !dyn_scissor) { skip |= LogError(device, "VUID-vkCmdBindPipeline-commandBuffer-04808", "Graphics pipeline incompatible with viewport/scissor inheritance."); } } return skip; } bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()"); static const std::map<VkPipelineBindPoint, std::string> bindpoint_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdBindPipeline-pipelineBindPoint-00777"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdBindPipeline-pipelineBindPoint-00778"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdBindPipeline-pipelineBindPoint-02391")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, "vkCmdBindPipeline()", bindpoint_errors); const auto *pipeline_state = GetPipelineState(pipeline); assert(pipeline_state); const auto &pipeline_state_bind_point = pipeline_state->getPipelineType(); if (pipelineBindPoint != pipeline_state_bind_point) { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-00779", "Cannot bind a pipeline of type %s to the graphics pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_COMPUTE) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-00780", "Cannot bind a pipeline of type %s to the compute pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } else if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindPipeline-pipelineBindPoint-02392", "Cannot bind a pipeline of type %s to the ray-tracing pipeline bind point", GetPipelineTypeName(pipeline_state_bind_point)); } } else { if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { skip |= ValidateGraphicsPipelineBindPoint(cb_state, pipeline_state); if (cb_state->activeRenderPass && phys_dev_ext_props.provoking_vertex_props.provokingVertexModePerPipeline == VK_FALSE) { const auto lvl_bind_point = ConvertToLvlBindPoint(pipelineBindPoint); const auto &last_bound_it = cb_state->lastBound[lvl_bind_point]; if (last_bound_it.pipeline_state) { auto last_bound_provoking_vertex_state_ci = LvlFindInChain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>( last_bound_it.pipeline_state->graphicsPipelineCI.pRasterizationState->pNext); auto current_provoking_vertex_state_ci = LvlFindInChain<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>( pipeline_state->graphicsPipelineCI.pRasterizationState->pNext); if (last_bound_provoking_vertex_state_ci && !current_provoking_vertex_state_ci) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881", "Previous %s's provokingVertexMode is %s, but %s doesn't chain " "VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.", report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str(), string_VkProvokingVertexModeEXT(last_bound_provoking_vertex_state_ci->provokingVertexMode), report_data->FormatHandle(pipeline).c_str()); } else if (!last_bound_provoking_vertex_state_ci && current_provoking_vertex_state_ci) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881", " %s's provokingVertexMode is %s, but previous %s doesn't chain " "VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.", report_data->FormatHandle(pipeline).c_str(), string_VkProvokingVertexModeEXT(current_provoking_vertex_state_ci->provokingVertexMode), report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str()); } else if (last_bound_provoking_vertex_state_ci && current_provoking_vertex_state_ci && last_bound_provoking_vertex_state_ci->provokingVertexMode != current_provoking_vertex_state_ci->provokingVertexMode) { skip |= LogError(pipeline, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881", "%s's provokingVertexMode is %s, but previous %s's provokingVertexMode is %s.", report_data->FormatHandle(pipeline).c_str(), string_VkProvokingVertexModeEXT(current_provoking_vertex_state_ci->provokingVertexMode), report_data->FormatHandle(last_bound_it.pipeline_state->pipeline()).c_str(), string_VkProvokingVertexModeEXT(last_bound_provoking_vertex_state_ci->provokingVertexMode)); } } } } } return skip; } bool CoreChecks::ForbidInheritedViewportScissor(VkCommandBuffer commandBuffer, const CMD_BUFFER_STATE *cb_state, const char* vuid, const char *cmdName) const { bool skip = false; if (cb_state->inheritedViewportDepths.size() != 0) { skip |= LogError( commandBuffer, vuid, "%s: commandBuffer must not have VkCommandBufferInheritanceViewportScissorInfoNV::viewportScissor2D enabled.", cmdName); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETVIEWPORT, "vkCmdSetViewport()"); skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetViewport-commandBuffer-04821", "vkCmdSetViewport"); return skip; } bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSCISSOR, "vkCmdSetScissor()"); skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetScissor-viewportScissor2D-04789", "vkCmdSetScissor"); return skip; } bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETEXCLUSIVESCISSORNV, "vkCmdSetExclusiveScissorNV()"); if (!enabled_features.exclusive_scissor.exclusiveScissor) { skip |= LogError(commandBuffer, "VUID-vkCmdSetExclusiveScissorNV-None-02031", "vkCmdSetExclusiveScissorNV: The exclusiveScissor feature is disabled."); } return skip; } bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BINDSHADINGRATEIMAGENV, "vkCmdBindShadingRateImageNV()"); if (!enabled_features.shading_rate_image.shadingRateImage) { skip |= LogError(commandBuffer, "VUID-vkCmdBindShadingRateImageNV-None-02058", "vkCmdBindShadingRateImageNV: The shadingRateImage feature is disabled."); } if (imageView != VK_NULL_HANDLE) { const auto view_state = GetImageViewState(imageView); auto &ivci = view_state->create_info; if (!view_state || (ivci.viewType != VK_IMAGE_VIEW_TYPE_2D && ivci.viewType != VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02059", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must be a valid " "VkImageView handle of type VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY."); } if (view_state && ivci.format != VK_FORMAT_R8_UINT) { skip |= LogError( imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02060", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, it must have a format of VK_FORMAT_R8_UINT."); } const VkImageCreateInfo *ici = view_state ? &GetImageState(view_state->create_info.image)->createInfo : nullptr; if (ici && !(ici->usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV)) { skip |= LogError(imageView, "VUID-vkCmdBindShadingRateImageNV-imageView-02061", "vkCmdBindShadingRateImageNV: If imageView is not VK_NULL_HANDLE, the image must have been " "created with VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV set."); } if (view_state) { const auto image_state = GetImageState(view_state->create_info.image); bool hit_error = false; // XXX TODO: While the VUID says "each subresource", only the base mip level is // actually used. Since we don't have an existing convenience function to iterate // over all mip levels, just don't bother with non-base levels. const VkImageSubresourceRange &range = view_state->normalized_subresource_range; VkImageSubresourceLayers subresource = {range.aspectMask, range.baseMipLevel, range.baseArrayLayer, range.layerCount}; if (image_state) { skip |= VerifyImageLayout(cb_state, image_state, subresource, imageLayout, VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV, "vkCmdCopyImage()", "VUID-vkCmdBindShadingRateImageNV-imageLayout-02063", "VUID-vkCmdBindShadingRateImageNV-imageView-02062", &hit_error); } } } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTSHADINGRATEPALETTENV, "vkCmdSetViewportShadingRatePaletteNV()"); if (!enabled_features.shading_rate_image.shadingRateImage) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportShadingRatePaletteNV-None-02064", "vkCmdSetViewportShadingRatePaletteNV: The shadingRateImage feature is disabled."); } for (uint32_t i = 0; i < viewportCount; ++i) { auto *palette = &pShadingRatePalettes[i]; if (palette->shadingRatePaletteEntryCount == 0 || palette->shadingRatePaletteEntryCount > phys_dev_ext_props.shading_rate_image_props.shadingRatePaletteSize) { skip |= LogError( commandBuffer, "VUID-VkShadingRatePaletteNV-shadingRatePaletteEntryCount-02071", "vkCmdSetViewportShadingRatePaletteNV: shadingRatePaletteEntryCount must be between 1 and shadingRatePaletteSize."); } } return skip; } bool CoreChecks::ValidateGeometryTrianglesNV(const VkGeometryTrianglesNV &triangles, const char *func_name) const { bool skip = false; const BUFFER_STATE *vb_state = GetBufferState(triangles.vertexData); if (vb_state != nullptr && vb_state->createInfo.size <= triangles.vertexOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-vertexOffset-02428", "%s", func_name); } const BUFFER_STATE *ib_state = GetBufferState(triangles.indexData); if (ib_state != nullptr && ib_state->createInfo.size <= triangles.indexOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-indexOffset-02431", "%s", func_name); } const BUFFER_STATE *td_state = GetBufferState(triangles.transformData); if (td_state != nullptr && td_state->createInfo.size <= triangles.transformOffset) { skip |= LogError(device, "VUID-VkGeometryTrianglesNV-transformOffset-02437", "%s", func_name); } return skip; } bool CoreChecks::ValidateGeometryAABBNV(const VkGeometryAABBNV &aabbs, const char *func_name) const { bool skip = false; const BUFFER_STATE *aabb_state = GetBufferState(aabbs.aabbData); if (aabb_state != nullptr && aabb_state->createInfo.size > 0 && aabb_state->createInfo.size <= aabbs.offset) { skip |= LogError(device, "VUID-VkGeometryAABBNV-offset-02439", "%s", func_name); } return skip; } bool CoreChecks::ValidateGeometryNV(const VkGeometryNV &geometry, const char *func_name) const { bool skip = false; if (geometry.geometryType == VK_GEOMETRY_TYPE_TRIANGLES_NV) { skip = ValidateGeometryTrianglesNV(geometry.geometry.triangles, func_name); } else if (geometry.geometryType == VK_GEOMETRY_TYPE_AABBS_NV) { skip = ValidateGeometryAABBNV(geometry.geometry.aabbs, func_name); } return skip; } bool CoreChecks::PreCallValidateCreateAccelerationStructureNV(VkDevice device, const VkAccelerationStructureCreateInfoNV *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureNV *pAccelerationStructure) const { bool skip = false; if (pCreateInfo != nullptr && pCreateInfo->info.type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) { for (uint32_t i = 0; i < pCreateInfo->info.geometryCount; i++) { skip |= ValidateGeometryNV(pCreateInfo->info.pGeometries[i], "vkCreateAccelerationStructureNV():"); } } return skip; } bool CoreChecks::PreCallValidateCreateAccelerationStructureKHR(VkDevice device, const VkAccelerationStructureCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkAccelerationStructureKHR *pAccelerationStructure) const { bool skip = false; if (pCreateInfo) { const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03614", "VkAccelerationStructureCreateInfoKHR(): buffer must have been created with a usage value containing " "VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR."); } if (buffer_state->createInfo.flags & VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT) { skip |= LogError(device, "VUID-VkAccelerationStructureCreateInfoKHR-buffer-03615", "VkAccelerationStructureCreateInfoKHR(): buffer must not have been created with " "VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT."); } if (pCreateInfo->offset + pCreateInfo->size > buffer_state->createInfo.size) { skip |= LogError( device, "VUID-VkAccelerationStructureCreateInfoKHR-offset-03616", "VkAccelerationStructureCreateInfoKHR(): The sum of offset and size must be less than the size of buffer."); } } } return skip; } bool CoreChecks::ValidateBindAccelerationStructureMemory(VkDevice device, const VkBindAccelerationStructureMemoryInfoNV &info) const { bool skip = false; const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(info.accelerationStructure); if (!as_state) { return skip; } if (!as_state->GetBoundMemory().empty()) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-accelerationStructure-03620", "vkBindAccelerationStructureMemoryNV(): accelerationStructure must not already be backed by a memory object."); } // Validate bound memory range information const auto mem_info = GetDevMemState(info.memory); if (mem_info) { skip |= ValidateInsertAccelerationStructureMemoryRange(info.accelerationStructure, mem_info, info.memoryOffset, "vkBindAccelerationStructureMemoryNV()"); skip |= ValidateMemoryTypes(mem_info, as_state->memory_requirements.memoryRequirements.memoryTypeBits, "vkBindAccelerationStructureMemoryNV()", "VUID-VkBindAccelerationStructureMemoryInfoNV-memory-03622"); } // Validate memory requirements alignment if (SafeModulo(info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment) != 0) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-memoryOffset-03623", "vkBindAccelerationStructureMemoryNV(): memoryOffset 0x%" PRIxLEAST64 " must be an integer multiple of the alignment 0x%" PRIxLEAST64 " member of the VkMemoryRequirements structure returned from " "a call to vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV", info.memoryOffset, as_state->memory_requirements.memoryRequirements.alignment); } if (mem_info) { // Validate memory requirements size if (as_state->memory_requirements.memoryRequirements.size > (mem_info->alloc_info.allocationSize - info.memoryOffset)) { skip |= LogError(info.accelerationStructure, "VUID-VkBindAccelerationStructureMemoryInfoNV-size-03624", "vkBindAccelerationStructureMemoryNV(): The size 0x%" PRIxLEAST64 " member of the VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with accelerationStructure and type of " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV must be less than or equal to the size " "of memory minus memoryOffset 0x%" PRIxLEAST64 ".", as_state->memory_requirements.memoryRequirements.size, mem_info->alloc_info.allocationSize - info.memoryOffset); } } return skip; } bool CoreChecks::PreCallValidateBindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV *pBindInfos) const { bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { skip |= ValidateBindAccelerationStructureMemory(device, pBindInfos[i]); } return skip; } bool CoreChecks::PreCallValidateGetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void *pData) const { bool skip = false; const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure); if (as_state != nullptr) { // TODO: update the fake VUID below once the real one is generated. skip = ValidateMemoryIsBoundToAccelerationStructure( as_state, "vkGetAccelerationStructureHandleNV", "UNASSIGNED-vkGetAccelerationStructureHandleNV-accelerationStructure-XXXX"); } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresKHR( VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESKHR, "vkCmdBuildAccelerationStructuresKHR()"); if (pInfos != NULL) { for (uint32_t info_index = 0; info_index < infoCount; ++info_index) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[info_index].srcAccelerationStructure); const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[info_index].dstAccelerationStructure); if (pInfos[info_index].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03667", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must " "have been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[info_index].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03758", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[info_index].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03759", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[info_index].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03760", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03700", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[info_index].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresKHR-pInfos-03699", "vkCmdBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } skip |= ValidateAccelerationBuffers(info_index, pInfos[info_index], "vkCmdBuildAccelerationStructuresKHR"); } } return skip; } bool CoreChecks::ValidateAccelerationBuffers(uint32_t info_index, const VkAccelerationStructureBuildGeometryInfoKHR &info, const char *func_name) const { bool skip = false; const auto geometry_count = info.geometryCount; const auto *p_geometries = info.pGeometries; const auto *const *const pp_geometries = info.ppGeometries; auto buffer_check = [this, info_index, func_name](uint32_t gi, const VkDeviceOrHostAddressConstKHR address, const char *field) -> bool { const auto itr = buffer_address_map_.find(address.deviceAddress); if (itr != buffer_address_map_.cend() && !(itr->second->createInfo.usage & VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR)) { LogObjectList objlist(device); objlist.add(itr->second->Handle()); return LogError(objlist, "VUID-vkCmdBuildAccelerationStructuresKHR-geometry-03673", "%s(): The buffer associated with pInfos[%" PRIu32 "].pGeometries[%" PRIu32 "].%s was not created with VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR.", func_name, info_index, gi, field); } return false; }; // Parameter validation has already checked VUID-VkAccelerationStructureBuildGeometryInfoKHR-pGeometries-03788 // !(pGeometries && ppGeometries) std::function<const VkAccelerationStructureGeometryKHR &(uint32_t)> geom_accessor; if (p_geometries) { geom_accessor = [p_geometries](uint32_t i) -> const VkAccelerationStructureGeometryKHR & { return p_geometries[i]; }; } else if (pp_geometries) { geom_accessor = [pp_geometries](uint32_t i) -> const VkAccelerationStructureGeometryKHR & { // pp_geometries[i] is assumed to be a valid pointer return *pp_geometries[i]; }; } if (geom_accessor) { for (uint32_t geom_index = 0; geom_index < geometry_count; ++geom_index) { const auto &geom_data = geom_accessor(geom_index); switch (geom_data.geometryType) { case VK_GEOMETRY_TYPE_TRIANGLES_KHR: // == VK_GEOMETRY_TYPE_TRIANGLES_NV skip |= buffer_check(geom_index, geom_data.geometry.triangles.vertexData, "geometry.triangles.vertexData"); skip |= buffer_check(geom_index, geom_data.geometry.triangles.indexData, "geometry.triangles.indexData"); skip |= buffer_check(geom_index, geom_data.geometry.triangles.transformData, "geometry.triangles.transformData"); break; case VK_GEOMETRY_TYPE_INSTANCES_KHR: skip |= buffer_check(geom_index, geom_data.geometry.instances.data, "geometry.instances.data"); break; case VK_GEOMETRY_TYPE_AABBS_KHR: // == VK_GEOMETRY_TYPE_AABBS_NV skip |= buffer_check(geom_index, geom_data.geometry.aabbs.data, "geometry.aabbs.data"); break; default: // no-op break; } } } return skip; } bool CoreChecks::PreCallValidateBuildAccelerationStructuresKHR( VkDevice device, VkDeferredOperationKHR deferredOperation, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkAccelerationStructureBuildRangeInfoKHR *const *ppBuildRangeInfos) const { bool skip = false; for (uint32_t i = 0; i < infoCount; ++i) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure); const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure); if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03667", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have " "been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03758", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[i].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03759", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[i].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03760", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03700", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkBuildAccelerationStructuresKHR-pInfos-03699", "vkBuildAccelerationStructuresKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV *pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURENV, "vkCmdBuildAccelerationStructureNV()"); if (pInfo != nullptr && pInfo->type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV) { for (uint32_t i = 0; i < pInfo->geometryCount; i++) { skip |= ValidateGeometryNV(pInfo->pGeometries[i], "vkCmdBuildAccelerationStructureNV():"); } } if (pInfo != nullptr && pInfo->geometryCount > phys_dev_ext_props.ray_tracing_propsNV.maxGeometryCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-geometryCount-02241", "vkCmdBuildAccelerationStructureNV(): geometryCount [%d] must be less than or equal to " "VkPhysicalDeviceRayTracingPropertiesNV::maxGeometryCount.", pInfo->geometryCount); } const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst); const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src); const BUFFER_STATE *scratch_buffer_state = GetBufferState(scratch); if (dst_as_state != nullptr && pInfo != nullptr) { if (dst_as_state->create_infoNV.info.type != pInfo->type) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::type" "[%s] must be identical to build info VkAccelerationStructureInfoNV::type [%s].", string_VkAccelerationStructureTypeNV(dst_as_state->create_infoNV.info.type), string_VkAccelerationStructureTypeNV(pInfo->type)); } if (dst_as_state->create_infoNV.info.flags != pInfo->flags) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::flags" "[0x%X] must be identical to build info VkAccelerationStructureInfoNV::flags [0x%X].", dst_as_state->create_infoNV.info.flags, pInfo->flags); } if (dst_as_state->create_infoNV.info.instanceCount < pInfo->instanceCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::instanceCount " "[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::instanceCount [%d].", dst_as_state->create_infoNV.info.instanceCount, pInfo->instanceCount); } if (dst_as_state->create_infoNV.info.geometryCount < pInfo->geometryCount) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info VkAccelerationStructureInfoNV::geometryCount" "[%d] must be greater than or equal to build info VkAccelerationStructureInfoNV::geometryCount [%d].", dst_as_state->create_infoNV.info.geometryCount, pInfo->geometryCount); } else { for (uint32_t i = 0; i < pInfo->geometryCount; i++) { const VkGeometryDataNV &create_geometry_data = dst_as_state->create_infoNV.info.pGeometries[i].geometry; const VkGeometryDataNV &build_geometry_data = pInfo->pGeometries[i].geometry; if (create_geometry_data.triangles.vertexCount < build_geometry_data.triangles.vertexCount) { skip |= LogError( commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.vertexCount [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.triangles.vertexCount [%d].", i, create_geometry_data.triangles.vertexCount, i, build_geometry_data.triangles.vertexCount); break; } if (create_geometry_data.triangles.indexCount < build_geometry_data.triangles.indexCount) { skip |= LogError( commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.triangles.indexCount [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.triangles.indexCount [%d].", i, create_geometry_data.triangles.indexCount, i, build_geometry_data.triangles.indexCount); break; } if (create_geometry_data.aabbs.numAABBs < build_geometry_data.aabbs.numAABBs) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-dst-02488", "vkCmdBuildAccelerationStructureNV(): create info pGeometries[%d].geometry.aabbs.numAABBs [%d]" "must be greater than or equal to build info pGeometries[%d].geometry.aabbs.numAABBs [%d].", i, create_geometry_data.aabbs.numAABBs, i, build_geometry_data.aabbs.numAABBs); break; } } } } if (dst_as_state != nullptr) { skip |= ValidateMemoryIsBoundToAccelerationStructure( dst_as_state, "vkCmdBuildAccelerationStructureNV()", "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV"); } if (update == VK_TRUE) { if (src == VK_NULL_HANDLE) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02489", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must not be VK_NULL_HANDLE."); } else { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02490", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, src must have been built before " "with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV set in " "VkAccelerationStructureInfoNV::flags."); } } if (dst_as_state != nullptr && !dst_as_state->update_scratch_memory_requirements_checked) { skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoUpdateMemReqQuery, "vkCmdBuildAccelerationStructureNV(): Updating %s but vkGetAccelerationStructureMemoryRequirementsNV() " "has not been called for update scratch memory.", report_data->FormatHandle(dst_as_state->acceleration_structure()).c_str()); // Use requirements fetched at create time } if (scratch_buffer_state != nullptr && dst_as_state != nullptr && dst_as_state->update_scratch_memory_requirements.memoryRequirements.size > (scratch_buffer_state->createInfo.size - scratchOffset)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02492", "vkCmdBuildAccelerationStructureNV(): If update is VK_TRUE, The size member of the " "VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with " "VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and " "VkAccelerationStructureMemoryRequirementsInfoNV::type set to " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV must be less than " "or equal to the size of scratch minus scratchOffset"); } } else { if (dst_as_state != nullptr && !dst_as_state->build_scratch_memory_requirements_checked) { skip |= LogWarning(dst, kVUID_Core_CmdBuildAccelNV_NoScratchMemReqQuery, "vkCmdBuildAccelerationStructureNV(): Assigning scratch buffer to %s but " "vkGetAccelerationStructureMemoryRequirementsNV() has not been called for scratch memory.", report_data->FormatHandle(dst_as_state->acceleration_structure()).c_str()); // Use requirements fetched at create time } if (scratch_buffer_state != nullptr && dst_as_state != nullptr && dst_as_state->build_scratch_memory_requirements.memoryRequirements.size > (scratch_buffer_state->createInfo.size - scratchOffset)) { skip |= LogError(commandBuffer, "VUID-vkCmdBuildAccelerationStructureNV-update-02491", "vkCmdBuildAccelerationStructureNV(): If update is VK_FALSE, The size member of the " "VkMemoryRequirements structure returned from a call to " "vkGetAccelerationStructureMemoryRequirementsNV with " "VkAccelerationStructureMemoryRequirementsInfoNV::accelerationStructure set to dst and " "VkAccelerationStructureMemoryRequirementsInfoNV::type set to " "VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV must be less than " "or equal to the size of scratch minus scratchOffset"); } } if (instanceData != VK_NULL_HANDLE) { const auto buffer_state = GetBufferState(instanceData); if (buffer_state != nullptr) { skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true, "VUID-VkAccelerationStructureInfoNV-instanceData-02782", "vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV"); } } if (scratch_buffer_state != nullptr) { skip |= ValidateBufferUsageFlags(scratch_buffer_state, VK_BUFFER_USAGE_RAY_TRACING_BIT_NV, true, "VUID-VkAccelerationStructureInfoNV-scratch-02781", "vkCmdBuildAccelerationStructureNV()", "VK_BUFFER_USAGE_RAY_TRACING_BIT_NV"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURENV, "vkCmdCopyAccelerationStructureNV()"); const ACCELERATION_STRUCTURE_STATE *dst_as_state = GetAccelerationStructureStateNV(dst); const ACCELERATION_STRUCTURE_STATE *src_as_state = GetAccelerationStructureStateNV(src); if (dst_as_state != nullptr) { skip |= ValidateMemoryIsBoundToAccelerationStructure( dst_as_state, "vkCmdBuildAccelerationStructureNV()", "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkAccelerationStructureNV"); } if (mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV) { if (src_as_state != nullptr && (!src_as_state->built || !(src_as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV))) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-src-03411", "vkCmdCopyAccelerationStructureNV(): src must have been built with " "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV if mode is " "VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV."); } } if (!(mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV || mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR)) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyAccelerationStructureNV-mode-03410", "vkCmdCopyAccelerationStructureNV():mode must be VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR" "or VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR."); } return skip; } bool CoreChecks::PreCallValidateDestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks *pAllocator) const { const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(accelerationStructure); bool skip = false; if (as_state) { skip |= ValidateObjectNotInUse(as_state, "vkDestroyAccelerationStructureNV", "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442"); } return skip; } bool CoreChecks::PreCallValidateDestroyAccelerationStructureKHR(VkDevice device, VkAccelerationStructureKHR accelerationStructure, const VkAllocationCallbacks *pAllocator) const { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(accelerationStructure); bool skip = false; if (as_state) { skip |= ValidateObjectNotInUse(as_state, "vkDestroyAccelerationStructureKHR", "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02442"); } if (pAllocator && !as_state->allocator) { skip |= LogError(device, "VUID-vkDestroyAccelerationStructureKHR-accelerationStructure-02444", "vkDestroyAccelerationStructureKH:If no VkAllocationCallbacks were provided when accelerationStructure" "was created, pAllocator must be NULL."); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV *pViewportWScalings) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWSCALINGNV, "vkCmdSetViewportWScalingNV()"); return skip; } bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETLINEWIDTH, "vkCmdSetLineWidth()"); return skip; } bool CoreChecks::PreCallValidateCmdSetLineStippleEXT(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETLINESTIPPLEEXT, "vkCmdSetLineStippleEXT()"); return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIAS, "vkCmdSetDepthBias()"); if ((depthBiasClamp != 0.0) && (!enabled_features.core.depthBiasClamp)) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBias-depthBiasClamp-00790", "vkCmdSetDepthBias(): the depthBiasClamp device feature is disabled: the depthBiasClamp parameter must " "be set to 0.0."); } return skip; } bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETBLENDCONSTANTS, "vkCmdSetBlendConstants()"); return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDS, "vkCmdSetDepthBounds()"); // The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs if (!device_extensions.vk_ext_depth_range_unrestricted) { if (!(minDepthBounds >= 0.0) || !(minDepthBounds <= 1.0)) { // Also VUID-vkCmdSetDepthBounds-minDepthBounds-00600 skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508", "vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and minDepthBounds " "(=%f) is not within the [0.0, 1.0] range.", minDepthBounds); } if (!(maxDepthBounds >= 0.0) || !(maxDepthBounds <= 1.0)) { // Also VUID-vkCmdSetDepthBounds-maxDepthBounds-00601 skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509", "vkCmdSetDepthBounds(): VK_EXT_depth_range_unrestricted extension is not enabled and maxDepthBounds " "(=%f) is not within the [0.0, 1.0] range.", maxDepthBounds); } } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSTENCILCOMPAREMASK, "vkCmdSetStencilCompareMask()"); return skip; } bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSTENCILWRITEMASK, "vkCmdSetStencilWriteMask()"); return skip; } bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSTENCILREFERENCE, "vkCmdSetStencilReference()"); return skip; } bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t *pDynamicOffsets) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()"); // Track total count of dynamic descriptor types to make sure we have an offset for each one uint32_t total_dynamic_descriptors = 0; string error_string = ""; const auto *pipeline_layout = GetPipelineLayout(layout); for (uint32_t set_idx = 0; set_idx < setCount; set_idx++) { const cvdescriptorset::DescriptorSet *descriptor_set = GetSetNode(pDescriptorSets[set_idx]); if (descriptor_set) { // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout if (!VerifySetLayoutCompatibility(report_data, descriptor_set, pipeline_layout, set_idx + firstSet, error_string)) { skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-00358", "vkCmdBindDescriptorSets(): descriptorSet #%u being bound is not compatible with overlapping " "descriptorSetLayout at index %u of " "%s due to: %s.", set_idx, set_idx + firstSet, report_data->FormatHandle(layout).c_str(), error_string.c_str()); } auto set_dynamic_descriptor_count = descriptor_set->GetDynamicDescriptorCount(); if (set_dynamic_descriptor_count) { // First make sure we won't overstep bounds of pDynamicOffsets array if ((total_dynamic_descriptors + set_dynamic_descriptor_count) > dynamicOffsetCount) { // Test/report this here, such that we don't run past the end of pDynamicOffsets in the else clause skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "vkCmdBindDescriptorSets(): descriptorSet #%u (%s) requires %u dynamicOffsets, but only %u " "dynamicOffsets are left in " "pDynamicOffsets array. There must be one dynamic offset for each dynamic descriptor being bound.", set_idx, report_data->FormatHandle(pDescriptorSets[set_idx]).c_str(), descriptor_set->GetDynamicDescriptorCount(), (dynamicOffsetCount - total_dynamic_descriptors)); // Set the number found to the maximum to prevent duplicate messages, or subsquent descriptor sets from // testing against the "short tail" we're skipping below. total_dynamic_descriptors = dynamicOffsetCount; } else { // Validate dynamic offsets and Dynamic Offset Minimums // offset for all sets (pDynamicOffsets) uint32_t cur_dyn_offset = total_dynamic_descriptors; // offset into this descriptor set uint32_t set_dyn_offset = 0; const auto &dsl = descriptor_set->GetLayout(); const auto binding_count = dsl->GetBindingCount(); const auto &limits = phys_dev_props.limits; for (uint32_t i = 0; i < binding_count; i++) { const auto *binding = dsl->GetDescriptorSetLayoutBindingPtrFromIndex(i); // skip checking binding if not needed if (cvdescriptorset::IsDynamicDescriptor(binding->descriptorType) == false) { continue; } // If a descriptor set has only binding 0 and 2 the binding_index will be 0 and 2 const uint32_t binding_index = binding->binding; const uint32_t descriptorCount = binding->descriptorCount; // Need to loop through each descriptor count inside the binding // if descriptorCount is zero the binding with a dynamic descriptor type does not count for (uint32_t j = 0; j < descriptorCount; j++) { const uint32_t offset = pDynamicOffsets[cur_dyn_offset]; if (offset == 0) { // offset of zero is equivalent of not having the dynamic offset cur_dyn_offset++; set_dyn_offset++; continue; } // Validate alignment with limit if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (SafeModulo(offset, limits.minUniformBufferOffsetAlignment) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01971", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of " "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ".", cur_dyn_offset, offset, limits.minUniformBufferOffsetAlignment); } if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) && (SafeModulo(offset, limits.minStorageBufferOffsetAlignment) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdBindDescriptorSets-pDynamicOffsets-01972", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is %u, but must be a multiple of " "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ".", cur_dyn_offset, offset, limits.minStorageBufferOffsetAlignment); } auto *descriptor = descriptor_set->GetDescriptorFromDynamicOffsetIndex(set_dyn_offset); assert(descriptor != nullptr); // Currently only GeneralBuffer are dynamic and need to be checked if (descriptor->GetClass() == cvdescriptorset::DescriptorClass::GeneralBuffer) { const auto *buffer_descriptor = static_cast<const cvdescriptorset::BufferDescriptor *>(descriptor); const VkDeviceSize bound_range = buffer_descriptor->GetRange(); const VkDeviceSize bound_offset = buffer_descriptor->GetOffset(); const BUFFER_STATE *buffer_state = buffer_descriptor->GetBufferState(); assert(buffer_state != nullptr); // Validate offset didn't go over buffer if ((bound_range == VK_WHOLE_SIZE) && (offset > 0)) { LogObjectList objlist(commandBuffer); objlist.add(pDescriptorSets[set_idx]); objlist.add(buffer_state->buffer()); skip |= LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x, but must be zero since " "the buffer descriptor's range is VK_WHOLE_SIZE in descriptorSet #%u binding #%u " "descriptor[%u].", cur_dyn_offset, offset, set_idx, binding_index, j); } else if ((bound_range != VK_WHOLE_SIZE) && ((offset + bound_range + bound_offset) > buffer_state->createInfo.size)) { LogObjectList objlist(commandBuffer); objlist.add(pDescriptorSets[set_idx]); objlist.add(buffer_state->buffer()); skip |= LogError(objlist, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-01979", "vkCmdBindDescriptorSets(): pDynamicOffsets[%u] is 0x%x which when added to the " "buffer descriptor's range (0x%" PRIxLEAST64 ") is greater than the size of the buffer (0x%" PRIxLEAST64 ") in descriptorSet #%u binding #%u descriptor[%u].", cur_dyn_offset, offset, bound_range, buffer_state->createInfo.size, set_idx, binding_index, j); } } cur_dyn_offset++; set_dyn_offset++; } // descriptorCount loop } // bindingCount loop // Keep running total of dynamic descriptor count to verify at the end total_dynamic_descriptors += set_dynamic_descriptor_count; } } } else { skip |= LogError(pDescriptorSets[set_idx], "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter", "vkCmdBindDescriptorSets(): Attempt to bind %s that doesn't exist!", report_data->FormatHandle(pDescriptorSets[set_idx]).c_str()); } } // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound if (total_dynamic_descriptors != dynamicOffsetCount) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindDescriptorSets-dynamicOffsetCount-00359", "vkCmdBindDescriptorSets(): Attempting to bind %u descriptorSets with %u dynamic descriptors, but " "dynamicOffsetCount is %u. It should " "exactly match the number of dynamic descriptors.", setCount, total_dynamic_descriptors, dynamicOffsetCount); } // firstSet and descriptorSetCount sum must be less than setLayoutCount if ((firstSet + setCount) > static_cast<uint32_t>(pipeline_layout->set_layouts.size())) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBindDescriptorSets-firstSet-00360", "vkCmdBindDescriptorSets(): Sum of firstSet (%u) and descriptorSetCount (%u) is greater than " "VkPipelineLayoutCreateInfo::setLayoutCount " "(%zu) when pipeline layout was created", firstSet, setCount, pipeline_layout->set_layouts.size()); } return skip; } // Validates that the supplied bind point is supported for the command buffer (vis. the command pool) // Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint // TODO add vkCmdBindPipeline bind_point validation using this call. bool CoreChecks::ValidatePipelineBindPoint(const CMD_BUFFER_STATE *cb_state, VkPipelineBindPoint bind_point, const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) const { bool skip = false; auto pool = cb_state->command_pool.get(); if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool static const std::map<VkPipelineBindPoint, VkQueueFlags> flag_mask = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, static_cast<VkQueueFlags>(VK_QUEUE_COMPUTE_BIT)), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)), }; const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex]; if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) { const std::string &error = bind_errors.at(bind_point); LogObjectList objlist(cb_state->commandBuffer()); objlist.add(cb_state->createInfo.commandPool); skip |= LogError(objlist, error, "%s: %s was allocated from %s that does not support bindpoint %s.", func_name, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(cb_state->createInfo.commandPool).c_str(), string_VkPipelineBindPoint(bind_point)); } } return skip; } bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *func_name = "vkCmdPushDescriptorSetKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETKHR, func_name); static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363")}; skip |= ValidatePipelineBindPoint(cb_state, pipelineBindPoint, func_name, bind_errors); const auto layout_data = GetPipelineLayout(layout); // Validate the set index points to a push descriptor set and is in range if (layout_data) { const auto &set_layouts = layout_data->set_layouts; if (set < set_layouts.size()) { const auto &dsl = set_layouts[set]; if (dsl) { if (!dsl->IsPushDescriptor()) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } else { // Create an empty proxy in order to use the existing descriptor set update validation // TODO move the validation (like this) that doesn't need descriptor set state to the DSL object so we // don't have to do this. cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this); skip |= ValidatePushDescriptorsUpdate(&proxy_ds, descriptorWriteCount, pDescriptorWrites, func_name); } } } else { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(set_layouts.size())); } } return skip; } bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) const { const auto buffer_state = GetBufferState(buffer); const auto cb_node = GetCBState(commandBuffer); assert(buffer_state); assert(cb_node); bool skip = ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_INDEX_BUFFER_BIT, true, "VUID-vkCmdBindIndexBuffer-buffer-00433", "vkCmdBindIndexBuffer()", "VK_BUFFER_USAGE_INDEX_BUFFER_BIT"); skip |= ValidateCmd(cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindIndexBuffer()", "VUID-vkCmdBindIndexBuffer-buffer-00434"); const auto offset_align = GetIndexAlignment(indexType); if (offset % offset_align) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00432", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.", offset, string_VkIndexType(indexType)); } if (offset >= buffer_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdBindIndexBuffer-offset-00431", "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", offset, buffer_state->requirements.size, report_data->FormatHandle(buffer_state->buffer()).c_str()); } return skip; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) const { const auto cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS, "vkCmdBindVertexBuffers()"); for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = GetBufferState(pBuffers[i]); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers-pBuffers-00627", "vkCmdBindVertexBuffers()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers()", "VUID-vkCmdBindVertexBuffers-pBuffers-00628"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers-pOffsets-00626", "vkCmdBindVertexBuffers() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } } } return skip; } // Validate that an image's sampleCount matches the requirement for a specific API call bool CoreChecks::ValidateImageSampleCount(const IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count, const char *location, const std::string &msgCode) const { bool skip = false; if (image_state->createInfo.samples != sample_count) { skip = LogError(image_state->image(), msgCode, "%s for %s was created with a sample count of %s but must be %s.", location, report_data->FormatHandle(image_state->image()).c_str(), string_VkSampleCountFlagBits(image_state->createInfo.samples), string_VkSampleCountFlagBits(sample_count)); } return skip; } bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void *pData) const { const auto cb_state = GetCBState(commandBuffer); assert(cb_state); const auto dst_buffer_state = GetBufferState(dstBuffer); assert(dst_buffer_state); bool skip = false; skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-dstBuffer-00035"); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdUpdateBuffer-dstBuffer-00034", "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmd(cb_state, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()"); skip |= ValidateProtectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01813"); skip |= ValidateUnprotectedBuffer(cb_state, dst_buffer_state, "vkCmdUpdateBuffer()", "VUID-vkCmdUpdateBuffer-commandBuffer-01814"); if (dstOffset >= dst_buffer_state->createInfo.size) { skip |= LogError( commandBuffer, "VUID-vkCmdUpdateBuffer-dstOffset-00032", "vkCmdUpdateBuffer() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset, dst_buffer_state->createInfo.size, report_data->FormatHandle(dst_buffer_state->buffer()).c_str()); } else if (dataSize > dst_buffer_state->createInfo.size - dstOffset) { skip |= LogError(commandBuffer, "VUID-vkCmdUpdateBuffer-dataSize-00033", "vkCmdUpdateBuffer() dataSize (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s) minus dstOffset (0x%" PRIxLEAST64 ").", dataSize, dst_buffer_state->createInfo.size, report_data->FormatHandle(dst_buffer_state->buffer()).c_str(), dstOffset); } return skip; } bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETEVENT, "vkCmdSetEvent()"); Location loc(Func::vkCmdSetEvent, Field::stageMask); LogObjectList objects(commandBuffer); skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } bool CoreChecks::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR *pDependencyInfo) const { const char *func = "vkCmdSetEvent2KHR()"; LogObjectList objects(commandBuffer); objects.add(event); const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETEVENT, func); Location loc(Func::vkCmdSetEvent2KHR, Field::pDependencyInfo); if (pDependencyInfo->dependencyFlags != 0) { skip |= LogError(objects, "VUID-vkCmdSetEvent2KHR-dependencyFlags-03825", "%s (%s) must be 0", loc.dot(Field::dependencyFlags).Message().c_str(), string_VkDependencyFlags(pDependencyInfo->dependencyFlags).c_str()); } skip |= ValidateDependencyInfo(objects, loc, cb_state, pDependencyInfo); return skip; } bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdResetEvent, Field::stageMask); bool skip = false; skip |= ValidateCmd(cb_state, CMD_RESETEVENT, "vkCmdResetEvent()"); skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } bool CoreChecks::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR stageMask) const { const char *func = "vkCmdResetEvent2KHR()"; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdResetEvent2KHR, Field::stageMask); bool skip = false; skip |= ValidateCmd(cb_state, CMD_RESETEVENT, func); skip |= ValidatePipelineStage(objects, loc, cb_state->GetQueueFlags(), stageMask); skip |= ValidateStageMaskHost(loc, stageMask); return skip; } static bool HasNonFramebufferStagePipelineStageFlags(VkPipelineStageFlags2KHR inflags) { return (inflags & ~(VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT)) != 0; } // transient helper struct for checking parts of VUID 02285 struct RenderPassDepState { using Location = core_error::Location; using Func = core_error::Func; using Struct = core_error::Struct; using Field = core_error::Field; const CoreChecks *core; const std::string func_name; const std::string vuid; uint32_t active_subpass; const VkRenderPass rp_handle; const VkPipelineStageFlags2KHR disabled_features; const std::vector<uint32_t> &self_dependencies; const safe_VkSubpassDependency2 *dependencies; RenderPassDepState(const CoreChecks *c, const std::string &f, const std::string &v, uint32_t subpass, const VkRenderPass handle, const DeviceFeatures &features, const std::vector<uint32_t> &self_deps, const safe_VkSubpassDependency2 *deps) : core(c), func_name(f), vuid(v), active_subpass(subpass), rp_handle(handle), disabled_features(sync_utils::DisabledPipelineStages(features)), self_dependencies(self_deps), dependencies(deps) {} VkMemoryBarrier2KHR GetSubPassDepBarrier(const safe_VkSubpassDependency2 &dep) { VkMemoryBarrier2KHR result; const auto *barrier = LvlFindInChain<VkMemoryBarrier2KHR>(dep.pNext); if (barrier) { result = *barrier; } else { result.srcStageMask = dep.srcStageMask; result.dstStageMask = dep.dstStageMask; result.srcAccessMask = dep.srcAccessMask; result.dstAccessMask = dep.dstAccessMask; } return result; } bool ValidateStage(const Location &loc, VkPipelineStageFlags2KHR src_stage_mask, VkPipelineStageFlags2KHR dst_stage_mask) { // Look for matching mask in any self-dependency bool match = false; for (const auto self_dep_index : self_dependencies) { const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]); auto sub_src_stage_mask = sync_utils::ExpandPipelineStages(sub_dep.srcStageMask, sync_utils::kAllQueueTypes, disabled_features); auto sub_dst_stage_mask = sync_utils::ExpandPipelineStages(sub_dep.dstStageMask, sync_utils::kAllQueueTypes, disabled_features); match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (src_stage_mask == (sub_src_stage_mask & src_stage_mask))) && ((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) || (dst_stage_mask == (sub_dst_stage_mask & dst_stage_mask))); if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency srcAccessMask " "for any self-dependency of subpass %d of %s for which dstAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::srcStageMask).Message().c_str(), src_stage_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency dstAccessMask " "for any self-dependency of subpass %d of %s for which srcAccessMask is also a subset. " "Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::dstStageMask).Message().c_str(), dst_stage_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } bool ValidateAccess(const Location &loc, VkAccessFlags2KHR src_access_mask, VkAccessFlags2KHR dst_access_mask) { bool match = false; for (const auto self_dep_index : self_dependencies) { const auto sub_dep = GetSubPassDepBarrier(dependencies[self_dep_index]); match = (src_access_mask == (sub_dep.srcAccessMask & src_access_mask)) && (dst_access_mask == (sub_dep.dstAccessMask & dst_access_mask)); if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency " "srcAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::srcAccessMask).Message().c_str(), src_access_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); core->LogError(rp_handle, vuid, "%s (0x%" PRIx64 ") is not a subset of VkSubpassDependency " "dstAccessMask of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", loc.dot(Field::dstAccessMask).Message().c_str(), dst_access_mask, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } bool ValidateDependencyFlag(VkDependencyFlags dependency_flags) { bool match = false; for (const auto self_dep_index : self_dependencies) { const auto &sub_dep = dependencies[self_dep_index]; match = sub_dep.dependencyFlags == dependency_flags; if (match) break; } if (!match) { std::stringstream self_dep_ss; stream_join(self_dep_ss, ", ", self_dependencies); core->LogError(rp_handle, vuid, "%s: dependencyFlags param (0x%X) does not equal VkSubpassDependency dependencyFlags value for any " "self-dependency of subpass %d of %s. Candidate VkSubpassDependency are pDependencies entries [%s].", func_name.c_str(), dependency_flags, active_subpass, core->report_data->FormatHandle(rp_handle).c_str(), self_dep_ss.str().c_str()); } return !match; } }; // Validate VUs for Pipeline Barriers that are within a renderPass // Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, VkDependencyFlags dependency_flags, uint32_t mem_barrier_count, const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count, const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) const { bool skip = false; const auto& rp_state = cb_state->activeRenderPass; RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier-pDependencies-02285", cb_state->activeSubpass, rp_state->renderPass(), enabled_features, rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies); if (state.self_dependencies.size() == 0) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-pDependencies-02285", "%s Barriers cannot be set during subpass %d of %s with no self-dependency specified.", outer_loc.Message().c_str(), state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str()); return skip; } // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass]; skip |= state.ValidateStage(outer_loc, src_stage_mask, dst_stage_mask); if (0 != buffer_mem_barrier_count) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(), buffer_mem_barrier_count, state.active_subpass, report_data->FormatHandle(rp_state->renderPass()).c_str()); } for (uint32_t i = 0; i < mem_barrier_count; ++i) { const auto &mem_barrier = mem_barriers[i]; Location loc(outer_loc.function, Struct::VkMemoryBarrier, Field::pMemoryBarriers, i); skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask); } for (uint32_t i = 0; i < image_mem_barrier_count; ++i) { const auto &img_barrier = image_barriers[i]; Location loc(outer_loc.function, Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i); skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask); if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182", "%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known if (VK_NULL_HANDLE != cb_state->activeFramebuffer) { skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc, state.rp_handle, img_barrier); } } skip |= state.ValidateDependencyFlag(dependency_flags); return skip; } bool CoreChecks::ValidateRenderPassPipelineBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state, const VkDependencyInfoKHR *dep_info) const { bool skip = false; const auto& rp_state = cb_state->activeRenderPass; RenderPassDepState state(this, outer_loc.StringFunc().c_str(), "VUID-vkCmdPipelineBarrier2KHR-pDependencies-02285", cb_state->activeSubpass, rp_state->renderPass(), enabled_features, rp_state->self_dependencies[cb_state->activeSubpass], rp_state->createInfo.pDependencies); if (state.self_dependencies.size() == 0) { skip |= LogError(state.rp_handle, state.vuid, "%s: Barriers cannot be set during subpass %d of %s with no self-dependency specified.", state.func_name.c_str(), state.active_subpass, report_data->FormatHandle(rp_state->renderPass()).c_str()); return skip; } // Grab ref to current subpassDescription up-front for use below const auto &sub_desc = rp_state->createInfo.pSubpasses[state.active_subpass]; for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) { const auto &mem_barrier = dep_info->pMemoryBarriers[i]; Location loc(outer_loc.function, Struct::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i); skip |= state.ValidateStage(loc, mem_barrier.srcStageMask, mem_barrier.dstStageMask); skip |= state.ValidateAccess(loc, mem_barrier.srcAccessMask, mem_barrier.dstAccessMask); } if (0 != dep_info->bufferMemoryBarrierCount) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-bufferMemoryBarrierCount-01178", "%s: bufferMemoryBarrierCount is non-zero (%d) for subpass %d of %s.", state.func_name.c_str(), dep_info->bufferMemoryBarrierCount, state.active_subpass, report_data->FormatHandle(state.rp_handle).c_str()); } for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) { const auto &img_barrier = dep_info->pImageMemoryBarriers[i]; Location loc(outer_loc.function, Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i); skip |= state.ValidateStage(loc, img_barrier.srcStageMask, img_barrier.dstStageMask); skip |= state.ValidateAccess(loc, img_barrier.srcAccessMask, img_barrier.dstAccessMask); if (VK_QUEUE_FAMILY_IGNORED != img_barrier.srcQueueFamilyIndex || VK_QUEUE_FAMILY_IGNORED != img_barrier.dstQueueFamilyIndex) { skip |= LogError(state.rp_handle, "VUID-vkCmdPipelineBarrier2KHR-srcQueueFamilyIndex-01182", "%s is %d and dstQueueFamilyIndex is %d but both must be VK_QUEUE_FAMILY_IGNORED.", loc.dot(Field::srcQueueFamilyIndex).Message().c_str(), img_barrier.srcQueueFamilyIndex, img_barrier.dstQueueFamilyIndex); } // Secondary CBs can have null framebuffer so record will queue up validation in that case 'til FB is known if (VK_NULL_HANDLE != cb_state->activeFramebuffer) { skip |= ValidateImageBarrierAttachment(loc, cb_state, cb_state->activeFramebuffer.get(), state.active_subpass, sub_desc, state.rp_handle, img_barrier); } } skip |= state.ValidateDependencyFlag(dep_info->dependencyFlags); return skip; } bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; // these are always allowed. stage_mask &= ~(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR | VK_PIPELINE_STAGE_2_TOP_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_BOTTOM_OF_PIPE_BIT_KHR | VK_PIPELINE_STAGE_2_HOST_BIT_KHR); if (stage_mask == 0) { return skip; } static const std::map<VkPipelineStageFlags2KHR, VkQueueFlags> metaFlags{ {VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_2_ALL_TRANSFER_BIT_KHR, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT}, {VK_PIPELINE_STAGE_2_PRE_RASTERIZATION_SHADERS_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, {VK_PIPELINE_STAGE_2_VERTEX_INPUT_BIT_KHR, VK_QUEUE_GRAPHICS_BIT}, }; for (const auto &entry : metaFlags) { if (((entry.first & stage_mask) != 0) && ((entry.second & queue_flags) == 0)) { const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, entry.first); skip |= LogError(objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.", loc.Message().c_str(), sync_utils::StringPipelineStageFlags(entry.first).c_str(), string_VkQueueFlags(queue_flags).c_str()); } stage_mask &= ~entry.first; } if (stage_mask == 0) { return skip; } auto supported_flags = sync_utils::ExpandPipelineStages(VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR, queue_flags); auto bad_flags = stage_mask & ~supported_flags; // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags for (size_t i = 0; i < sizeof(bad_flags) * 8; i++) { VkPipelineStageFlags2KHR bit = (1ULL << i) & bad_flags; if (bit) { const auto& vuid = sync_vuid_maps::GetStageQueueCapVUID(loc, bit); skip |= LogError( objects, vuid, "%s flag %s is not compatible with the queue family properties (%s) of this command buffer.", loc.Message().c_str(), sync_utils::StringPipelineStageFlags(bit).c_str(), string_VkQueueFlags(queue_flags).c_str()); } } return skip; } bool CoreChecks::ValidatePipelineStageFeatureEnables(const LogObjectList &objects, const Location &loc, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; if (!enabled_features.synchronization2_features.synchronization2 && stage_mask == 0) { const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, 0); std::stringstream msg; msg << loc.Message() << " must not be 0 unless synchronization2 is enabled."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } auto disabled_stages = sync_utils::DisabledPipelineStages(enabled_features); auto bad_bits = stage_mask & disabled_stages; if (bad_bits == 0) { return skip; } for (size_t i = 0; i < sizeof(bad_bits) * 8; i++) { VkPipelineStageFlags2KHR bit = 1ULL << i; if (bit & bad_bits) { const auto& vuid = sync_vuid_maps::GetBadFeatureVUID(loc, bit); std::stringstream msg; msg << loc.Message() << " includes " << sync_utils::StringPipelineStageFlags(bit) << " when the device does not have " << sync_vuid_maps::kFeatureNameMap.at(bit) << " feature enabled."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } } return skip; } bool CoreChecks::ValidatePipelineStage(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; skip |= ValidateStageMasksAgainstQueueCapabilities(objects, loc, queue_flags, stage_mask); skip |= ValidatePipelineStageFeatureEnables(objects, loc, stage_mask); return skip; } bool CoreChecks::ValidateAccessMask(const LogObjectList &objects, const Location &loc, VkQueueFlags queue_flags, VkAccessFlags2KHR access_mask, VkPipelineStageFlags2KHR stage_mask) const { bool skip = false; // Early out if all commands set if ((stage_mask & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR) != 0) return skip; // or if only generic memory accesses are specified (or we got a 0 mask) access_mask &= ~(VK_ACCESS_2_MEMORY_READ_BIT_KHR | VK_ACCESS_2_MEMORY_WRITE_BIT_KHR); if (access_mask == 0) return skip; auto expanded_stages = sync_utils::ExpandPipelineStages(stage_mask, queue_flags); // TODO: auto valid_accesses = sync_utils::CompatibleAccessMask(expanded_stages); auto bad_accesses = (access_mask & ~valid_accesses); if (bad_accesses == 0) { return skip; } for (size_t i = 0; i < sizeof(bad_accesses) * 8; i++) { VkAccessFlags2KHR bit = (1ULL << i); if (bad_accesses & bit) { const auto& vuid = sync_vuid_maps::GetBadAccessFlagsVUID(loc, bit); std::stringstream msg; msg << loc.Message() << " bit " << sync_utils::StringAccessFlags(bit) << " is not supported by stage mask (" << sync_utils::StringPipelineStageFlags(stage_mask) << ")."; skip |= LogError(objects, vuid, "%s", msg.str().c_str()); } } return skip; } bool CoreChecks::ValidateEventStageMask(const ValidationStateTracker *state_data, const CMD_BUFFER_STATE *pCB, size_t eventCount, size_t firstEventIndex, VkPipelineStageFlags2KHR sourceStageMask, EventToStageMap *localEventToStageMap) { bool skip = false; VkPipelineStageFlags2KHR stage_mask = 0; const auto max_event = std::min((firstEventIndex + eventCount), pCB->events.size()); for (size_t event_index = firstEventIndex; event_index < max_event; ++event_index) { auto event = pCB->events[event_index]; auto event_data = localEventToStageMap->find(event); if (event_data != localEventToStageMap->end()) { stage_mask |= event_data->second; } else { auto global_event_data = state_data->GetEventState(event); if (!global_event_data) { skip |= state_data->LogError(event, kVUID_Core_DrawState_InvalidEvent, "%s cannot be waited on if it has never been set.", state_data->report_data->FormatHandle(event).c_str()); } else { stage_mask |= global_event_data->stageMask; } } } // TODO: Need to validate that host_bit is only set if set event is called // but set event can be called at any time. if (sourceStageMask != stage_mask && sourceStageMask != (stage_mask | VK_PIPELINE_STAGE_HOST_BIT)) { skip |= state_data->LogError( pCB->commandBuffer(), "VUID-vkCmdWaitEvents-srcStageMask-parameter", "Submitting cmdbuffer with call to VkCmdWaitEvents using srcStageMask 0x%" PRIx64 " which must be the bitwise OR of " "the stageMask parameters used in calls to vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if used with " "vkSetEvent but instead is 0x%" PRIx64 ".", sourceStageMask, stage_mask); } return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto queue_flags = cb_state->GetQueueFlags(); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdWaitEvents); skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask); skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask); skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); skip |= ValidateBarriers(loc.dot(Field::pDependencyInfo), cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } bool CoreChecks::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; for (uint32_t i = 0; (i < eventCount) && !skip; i++) { LogObjectList objects(commandBuffer); objects.add(pEvents[i]); Location loc(Func::vkCmdWaitEvents2KHR, Field::pDependencyInfos, i); if (pDependencyInfos[i].dependencyFlags != 0) { skip |= LogError(objects, "VUID-vkCmdWaitEvents2KHR-dependencyFlags-03844", "%s (%s) must be 0.", loc.dot(Field::dependencyFlags).Message().c_str(), string_VkDependencyFlags(pDependencyInfos[i].dependencyFlags).c_str()); } skip |= ValidateDependencyInfo(objects, loc, cb_state, &pDependencyInfos[i]); } skip |= ValidateCmd(cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()"); return skip; } void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // The StateTracker added will add to the events vector. auto first_event_index = cb_state->events.size(); StateTracker::PreCallRecordCmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); auto event_added_count = cb_state->events.size() - first_event_index; const CMD_BUFFER_STATE *cb_state_const = cb_state; cb_state->eventUpdates.emplace_back( [cb_state_const, event_added_count, first_event_index, sourceStageMask]( const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) { if (!do_validate) return false; return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, sourceStageMask, localEventToStageMap); }); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PreCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // The StateTracker added will add to the events vector. auto first_event_index = cb_state->events.size(); StateTracker::PreCallRecordCmdWaitEvents2KHR(commandBuffer, eventCount, pEvents, pDependencyInfos); auto event_added_count = cb_state->events.size() - first_event_index; const CMD_BUFFER_STATE *cb_state_const = cb_state; for (uint32_t i = 0; i < eventCount; i++) { const auto &dep_info = pDependencyInfos[i]; auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info); cb_state->eventUpdates.emplace_back( [cb_state_const, event_added_count, first_event_index, stage_masks]( const ValidationStateTracker *device_data, bool do_validate, EventToStageMap *localEventToStageMap) { if (!do_validate) return false; return ValidateEventStageMask(device_data, cb_state_const, event_added_count, first_event_index, stage_masks.src, localEventToStageMap); }); TransitionImageLayouts(cb_state, dep_info.imageMemoryBarrierCount, dep_info.pImageMemoryBarriers); } } void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriers(Func::vkCmdWaitEvents, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PostCallRecordCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents, const VkDependencyInfoKHR *pDependencyInfos) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); for (uint32_t i = 0; i < eventCount; i++) { const auto &dep_info = pDependencyInfos[i]; RecordBarriers(Func::vkCmdWaitEvents2KHR, cb_state, dep_info); } } bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); auto queue_flags = cb_state->GetQueueFlags(); Location loc(Func::vkCmdPipelineBarrier); skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, srcStageMask); skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, dstStageMask); skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); if (skip) return true; // Early return to avoid redundant errors from below calls } else { if (dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { skip = LogError(objects, "VUID-vkCmdPipelineBarrier-dependencyFlags-01186", "%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance", loc.dot(Field::dependencyFlags).Message().c_str()); } } skip |= ValidateBarriers(loc, cb_state, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); return skip; } bool CoreChecks::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); LogObjectList objects(commandBuffer); Location loc(Func::vkCmdPipelineBarrier2KHR, Field::pDependencyInfo); skip |= ValidateCmd(cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()"); if (cb_state->activeRenderPass) { skip |= ValidateRenderPassPipelineBarriers(loc, cb_state, pDependencyInfo); if (skip) return true; // Early return to avoid redundant errors from below calls } else { if (pDependencyInfo->dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { skip = LogError(objects, "VUID-vkCmdPipelineBarrier2KHR-dependencyFlags-01186", "%s VK_DEPENDENCY_VIEW_LOCAL_BIT must not be set outside of a render pass instance", loc.dot(Field::dependencyFlags).Message().c_str()); } } skip |= ValidateDependencyInfo(objects, loc, cb_state, pDependencyInfo); return skip; } void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriers(Func::vkCmdPipelineBarrier, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); TransitionImageLayouts(cb_state, imageMemoryBarrierCount, pImageMemoryBarriers); StateTracker::PreCallRecordCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } void CoreChecks::PreCallRecordCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR *pDependencyInfo) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); RecordBarriers(Func::vkCmdPipelineBarrier2KHR, cb_state, *pDependencyInfo); TransitionImageLayouts(cb_state, pDependencyInfo->imageMemoryBarrierCount, pDependencyInfo->pImageMemoryBarriers); StateTracker::PreCallRecordCmdPipelineBarrier2KHR(commandBuffer, pDependencyInfo); } bool CoreChecks::ValidateBeginQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, VkFlags flags, uint32_t index, CMD_TYPE cmd, const char *cmd_name, const ValidateBeginQueryVuids *vuids) const { bool skip = false; const auto *query_pool_state = GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_TIMESTAMP) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBeginQuery-queryType-02804", "%s: The querypool's query type must not be VK_QUERY_TYPE_TIMESTAMP.", cmd_name); } // Check for nested queries if (cb_state->activeQueries.size()) { for (const auto &a_query : cb_state->activeQueries) { auto active_query_pool_state = GetQueryPoolState(a_query.pool); if (active_query_pool_state->createInfo.queryType == query_pool_ci.queryType && a_query.index == index) { LogObjectList obj_list(cb_state->commandBuffer()); obj_list.add(query_obj.pool); obj_list.add(a_query.pool); skip |= LogError(obj_list, vuids->vuid_dup_query_type, "%s: Within the same command buffer %s, query %d from pool %s has same queryType as active query " "%d from pool %s.", cmd_name, report_data->FormatHandle(cb_state->commandBuffer()).c_str(), query_obj.index, report_data->FormatHandle(query_obj.pool).c_str(), a_query.index, report_data->FormatHandle(a_query.pool).c_str()); } } } // There are tighter queue constraints to test for certain query pools if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_feedback); } if (query_pool_ci.queryType == VK_QUERY_TYPE_OCCLUSION) { skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT, vuids->vuid_queue_occlusion); } if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (!cb_state->performance_lock_acquired) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_profile_lock, "%s: profiling lock must be held before vkBeginCommandBuffer is called on " "a command buffer where performance queries are recorded.", cmd_name); } if (query_pool_state->has_perf_scope_command_buffer && cb_state->commandCount > 0) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_scope_not_first, "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but %s is not the first recorded " "command in the command buffer.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_scope_in_rp, "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags); if (flags & VK_QUERY_CONTROL_PRECISE_BIT) { if (!enabled_features.core.occlusionQueryPrecise) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but precise occlusion queries not enabled on the device.", cmd_name); } if (query_pool_ci.queryType != VK_QUERY_TYPE_OCCLUSION) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_precise, "%s: VK_QUERY_CONTROL_PRECISE_BIT provided, but pool query type is not VK_QUERY_TYPE_OCCLUSION", cmd_name); } } if (query_obj.query >= query_pool_ci.queryCount) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_query_count, "%s: Query index %" PRIu32 " must be less than query count %" PRIu32 " of %s.", cmd_name, query_obj.query, query_pool_ci.queryCount, report_data->FormatHandle(query_obj.pool).c_str()); } if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_protected_cb, "%s: command can't be used in protected command buffers.", cmd_name); } skip |= ValidateCmd(cb_state, cmd, cmd_name); return skip; } bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, slot); struct BeginQueryVuids : ValidateBeginQueryVuids { BeginQueryVuids() : ValidateBeginQueryVuids() { vuid_queue_flags = "VUID-vkCmdBeginQuery-commandBuffer-cmdpool"; vuid_queue_feedback = "VUID-vkCmdBeginQuery-queryType-02327"; vuid_queue_occlusion = "VUID-vkCmdBeginQuery-queryType-00803"; vuid_precise = "VUID-vkCmdBeginQuery-queryType-00800"; vuid_query_count = "VUID-vkCmdBeginQuery-query-00802"; vuid_profile_lock = "VUID-vkCmdBeginQuery-queryPool-03223"; vuid_scope_not_first = "VUID-vkCmdBeginQuery-queryPool-03224"; vuid_scope_in_rp = "VUID-vkCmdBeginQuery-queryPool-03225"; vuid_dup_query_type = "VUID-vkCmdBeginQuery-queryPool-01922"; vuid_protected_cb = "VUID-vkCmdBeginQuery-commandBuffer-01885"; } }; BeginQueryVuids vuids; return ValidateBeginQuery(cb_state, query_obj, flags, 0, CMD_BEGINQUERY, "vkCmdBeginQuery()", &vuids); } bool CoreChecks::VerifyQueryIsReset(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { bool skip = false; const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; QueryState state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass); // If reset was in another command buffer, check the global map if (state == QUERYSTATE_UNKNOWN) { state = state_data->GetQueryState(&state_data->queryToStateMap, query_obj.pool, query_obj.query, perfPass); } // Performance queries have limitation upon when they can be // reset. if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR && state == QUERYSTATE_UNKNOWN && perfPass >= query_pool_state->n_performance_passes) { // If the pass is invalid, assume RESET state, another error // will be raised in ValidatePerformanceQuery(). state = QUERYSTATE_RESET; } if (state != QUERYSTATE_RESET) { skip |= state_data->LogError(commandBuffer, kVUID_Core_DrawState_QueryNotReset, "%s: %s and query %" PRIu32 ": query not reset. " "After query pool creation, each query must be reset before it is used. " "Queries must also be reset between uses.", func_name, state_data->report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } return skip; } bool CoreChecks::ValidatePerformanceQuery(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, QueryObject query_obj, const char *func_name, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { const auto *query_pool_state = state_data->GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) return false; const CMD_BUFFER_STATE *cb_state = state_data->GetCBState(commandBuffer); bool skip = false; if (perfPass >= query_pool_state->n_performance_passes) { skip |= state_data->LogError(commandBuffer, "VUID-VkPerformanceQuerySubmitInfoKHR-counterPassIndex-03221", "Invalid counterPassIndex (%u, maximum allowed %u) value for query pool %s.", perfPass, query_pool_state->n_performance_passes, state_data->report_data->FormatHandle(query_obj.pool).c_str()); } if (!cb_state->performance_lock_acquired || cb_state->performance_lock_released) { skip |= state_data->LogError(commandBuffer, "VUID-vkQueueSubmit-pCommandBuffers-03220", "Commandbuffer %s was submitted and contains a performance query but the" "profiling lock was not held continuously throughout the recording of commands.", state_data->report_data->FormatHandle(commandBuffer).c_str()); } QueryState command_buffer_state = state_data->GetQueryState(localQueryToStateMap, query_obj.pool, query_obj.query, perfPass); if (command_buffer_state == QUERYSTATE_RESET) { skip |= state_data->LogError( commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-None-02863" : "VUID-vkCmdBeginQuery-None-02863", "VkQuery begin command recorded in a command buffer that, either directly or " "through secondary command buffers, also contains a vkCmdResetQueryPool command " "affecting the same query."); } if (firstPerfQueryPool != VK_NULL_HANDLE) { if (firstPerfQueryPool != query_obj.pool && !state_data->enabled_features.performance_query_features.performanceCounterMultipleQueryPools) { skip |= state_data->LogError( commandBuffer, query_obj.indexed ? "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03226" : "VUID-vkCmdBeginQuery-queryPool-03226", "Commandbuffer %s contains more than one performance query pool but " "performanceCounterMultipleQueryPools is not enabled.", state_data->report_data->FormatHandle(commandBuffer).c_str()); } } else { firstPerfQueryPool = query_obj.pool; } return skip; } void CoreChecks::EnqueueVerifyBeginQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj, const char *func_name) { CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer); // Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord cb_state->queryUpdates.emplace_back([command_buffer, query_obj, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; skip |= ValidatePerformanceQuery(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); skip |= VerifyQueryIsReset(device_data, command_buffer, query_obj, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); return skip; }); } void CoreChecks::PreCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) { if (disabled[query_validation]) return; QueryObject query_obj = {queryPool, slot}; EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQuery()"); } void CoreChecks::EnqueueVerifyEndQuery(VkCommandBuffer command_buffer, const QueryObject &query_obj) { CMD_BUFFER_STATE *cb_state = GetCBState(command_buffer); // Enqueue the submit time validation here, ahead of the submit time state update in the StateTracker's PostCallRecord cb_state->queryUpdates.emplace_back([command_buffer, query_obj](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; const CMD_BUFFER_STATE *cb_state = device_data->GetCBState(command_buffer); const auto *query_pool_state = device_data->GetQueryPoolState(query_obj.pool); if (query_pool_state->has_perf_scope_command_buffer && (cb_state->commandCount - 1) != query_obj.endCommandIndex) { skip |= device_data->LogError(command_buffer, "VUID-vkCmdEndQuery-queryPool-03227", "vkCmdEndQuery: Query pool %s was created with a counter of scope" "VK_QUERY_SCOPE_COMMAND_BUFFER_KHR but the end of the query is not the last " "command in the command buffer %s.", device_data->report_data->FormatHandle(query_obj.pool).c_str(), device_data->report_data->FormatHandle(command_buffer).c_str()); } return skip; }); } bool CoreChecks::ValidateCmdEndQuery(const CMD_BUFFER_STATE *cb_state, const QueryObject &query_obj, uint32_t index, CMD_TYPE cmd, const char *cmd_name, const ValidateEndQueryVuids *vuids) const { bool skip = false; if (!cb_state->activeQueries.count(query_obj)) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_active_queries, "%s: Ending a query before it was started: %s, index %d.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), query_obj.query); } const auto *query_pool_state = GetQueryPoolState(query_obj.pool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { if (query_pool_state->has_perf_scope_render_pass && cb_state->activeRenderPass) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQuery-queryPool-03228", "%s: Query pool %s was created with a counter of scope " "VK_QUERY_SCOPE_RENDER_PASS_KHR but %s is inside a render pass.", cmd_name, report_data->FormatHandle(query_obj.pool).c_str(), cmd_name); } } skip |= ValidateCmdQueueFlags(cb_state, cmd_name, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT, vuids->vuid_queue_flags); skip |= ValidateCmd(cb_state, cmd, cmd_name); if (cb_state->unprotected == false) { skip |= LogError(cb_state->commandBuffer(), vuids->vuid_protected_cb, "%s: command can't be used in protected command buffers.", cmd_name); } return skip; } bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; bool skip = false; QueryObject query_obj = {queryPool, slot}; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { const uint32_t available_query_count = query_pool_state->createInfo.queryCount; // Only continue validating if the slot is even within range if (slot >= available_query_count) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdEndQuery-query-00810", "vkCmdEndQuery(): query index (%u) is greater or equal to the queryPool size (%u).", slot, available_query_count); } else { struct EndQueryVuids : ValidateEndQueryVuids { EndQueryVuids() : ValidateEndQueryVuids() { vuid_queue_flags = "VUID-vkCmdEndQuery-commandBuffer-cmdpool"; vuid_active_queries = "VUID-vkCmdEndQuery-None-01923"; vuid_protected_cb = "VUID-vkCmdEndQuery-commandBuffer-01886"; } }; EndQueryVuids vuids; skip |= ValidateCmdEndQuery(cb_state, query_obj, 0, CMD_ENDQUERY, "vkCmdEndQuery()", &vuids); } } return skip; } void CoreChecks::PreCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query_obj = {queryPool, slot}; query_obj.endCommandIndex = cb_state->commandCount - 1; EnqueueVerifyEndQuery(commandBuffer, query_obj); } bool CoreChecks::ValidateQueryPoolIndex(VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *func_name, const char *first_vuid, const char *sum_vuid) const { bool skip = false; const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { const uint32_t available_query_count = query_pool_state->createInfo.queryCount; if (firstQuery >= available_query_count) { skip |= LogError(queryPool, first_vuid, "%s: In Query %s the firstQuery (%u) is greater or equal to the queryPool size (%u).", func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, available_query_count); } if ((firstQuery + queryCount) > available_query_count) { skip |= LogError(queryPool, sum_vuid, "%s: In Query %s the sum of firstQuery (%u) + queryCount (%u) is greater than the queryPool size (%u).", func_name, report_data->FormatHandle(queryPool).c_str(), firstQuery, queryCount, available_query_count); } } return skip; } bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()"); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "VkCmdResetQueryPool()", "VUID-vkCmdResetQueryPool-firstQuery-00796", "VUID-vkCmdResetQueryPool-firstQuery-00797"); return skip; } static QueryResultType GetQueryResultType(QueryState state, VkQueryResultFlags flags) { switch (state) { case QUERYSTATE_UNKNOWN: return QUERYRESULT_UNKNOWN; case QUERYSTATE_RESET: case QUERYSTATE_RUNNING: if (flags & VK_QUERY_RESULT_WAIT_BIT) { return ((state == QUERYSTATE_RESET) ? QUERYRESULT_WAIT_ON_RESET : QUERYRESULT_WAIT_ON_RUNNING); } else if ((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_NO_DATA; } case QUERYSTATE_ENDED: if ((flags & VK_QUERY_RESULT_WAIT_BIT) || (flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT)) { return QUERYRESULT_SOME_DATA; } else { return QUERYRESULT_UNKNOWN; } case QUERYSTATE_AVAILABLE: return QUERYRESULT_SOME_DATA; } assert(false); return QUERYRESULT_UNKNOWN; } bool CoreChecks::ValidateCopyQueryPoolResults(const ValidationStateTracker *state_data, VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, uint32_t perfPass, VkQueryResultFlags flags, QueryMap *localQueryToStateMap) { bool skip = false; for (uint32_t i = 0; i < queryCount; i++) { QueryState state = state_data->GetQueryState(localQueryToStateMap, queryPool, firstQuery + i, perfPass); QueryResultType result_type = GetQueryResultType(state, flags); if (result_type != QUERYRESULT_SOME_DATA && result_type != QUERYRESULT_UNKNOWN) { skip |= state_data->LogError( commandBuffer, kVUID_Core_DrawState_InvalidQuery, "vkCmdCopyQueryPoolResults(): Requesting a copy from query to buffer on %s query %" PRIu32 ": %s", state_data->report_data->FormatHandle(queryPool).c_str(), firstQuery + i, string_QueryResultType(result_type)); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) const { if (disabled[query_validation]) return false; const auto cb_state = GetCBState(commandBuffer); const auto dst_buff_state = GetBufferState(dstBuffer); assert(cb_state); assert(dst_buff_state); bool skip = ValidateMemoryIsBoundToBuffer(dst_buff_state, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00826"); skip |= ValidateQueryPoolStride("VUID-vkCmdCopyQueryPoolResults-flags-00822", "VUID-vkCmdCopyQueryPoolResults-flags-00823", stride, "dstOffset", dstOffset, flags); // Validate that DST buffer has correct usage flags set skip |= ValidateBufferUsageFlags(dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00825", "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT"); skip |= ValidateCmd(cb_state, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()"); skip |= ValidateQueryPoolIndex(queryPool, firstQuery, queryCount, "vkCmdCopyQueryPoolResults()", "VUID-vkCmdCopyQueryPoolResults-firstQuery-00820", "VUID-vkCmdCopyQueryPoolResults-firstQuery-00821"); if (dstOffset >= dst_buff_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstOffset-00819", "vkCmdCopyQueryPoolResults() dstOffset (0x%" PRIxLEAST64 ") is not less than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset, dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer()).c_str()); } else if (dstOffset + (queryCount * stride) > dst_buff_state->requirements.size) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-dstBuffer-00824", "vkCmdCopyQueryPoolResults() storage required (0x%" PRIxLEAST64 ") equal to dstOffset + (queryCount * stride) is greater than the size (0x%" PRIxLEAST64 ") of buffer (%s).", dstOffset + (queryCount * stride), dst_buff_state->requirements.size, report_data->FormatHandle(dst_buff_state->buffer()).c_str()); } auto query_pool_state_iter = queryPoolMap.find(queryPool); if (query_pool_state_iter != queryPoolMap.end()) { auto query_pool_state = query_pool_state_iter->second.get(); if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR) { skip |= ValidatePerformanceQueryResults("vkCmdCopyQueryPoolResults", query_pool_state, firstQuery, queryCount, flags); if (!phys_dev_ext_props.performance_query_props.allowCommandBufferQueryCopies) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-03232", "vkCmdCopyQueryPoolResults called with query pool %s but " "VkPhysicalDevicePerformanceQueryPropertiesKHR::allowCommandBufferQueryCopies " "is not set.", report_data->FormatHandle(queryPool).c_str()); } } if ((query_pool_state->createInfo.queryType == VK_QUERY_TYPE_TIMESTAMP) && ((flags & VK_QUERY_RESULT_PARTIAL_BIT) != 0)) { skip |= LogError(commandBuffer, "VUID-vkCmdCopyQueryPoolResults-queryType-00827", "vkCmdCopyQueryPoolResults() query pool %s was created with VK_QUERY_TYPE_TIMESTAMP so flags must not " "contain VK_QUERY_RESULT_PARTIAL_BIT.", report_data->FormatHandle(queryPool).c_str()); } if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL) { skip |= LogError(queryPool, "VUID-vkCmdCopyQueryPoolResults-queryType-02734", "vkCmdCopyQueryPoolResults() called but QueryPool %s was created with queryType " "VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL.", report_data->FormatHandle(queryPool).c_str()); } } return skip; } void CoreChecks::PreCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) { if (disabled[query_validation]) return; auto cb_state = GetCBState(commandBuffer); cb_state->queryUpdates.emplace_back([commandBuffer, queryPool, firstQuery, queryCount, flags]( const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return ValidateCopyQueryPoolResults(device_data, commandBuffer, queryPool, firstQuery, queryCount, perfPass, flags, localQueryToStateMap); }); } bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void *pValues) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state, CMD_PUSHCONSTANTS, "vkCmdPushConstants()"); // Check if pipeline_layout VkPushConstantRange(s) overlapping offset, size have stageFlags set for each stage in the command // stageFlags argument, *and* that the command stageFlags argument has bits set for the stageFlags in each overlapping range. if (!skip) { const auto &ranges = *GetPipelineLayout(layout)->push_constant_ranges; VkShaderStageFlags found_stages = 0; for (const auto &range : ranges) { if ((offset >= range.offset) && (offset + size <= range.offset + range.size)) { VkShaderStageFlags matching_stages = range.stageFlags & stageFlags; if (matching_stages != range.stageFlags) { skip |= LogError(commandBuffer, "VUID-vkCmdPushConstants-offset-01796", "vkCmdPushConstants(): stageFlags (%s, offset (%" PRIu32 "), and size (%" PRIu32 "), must contain all stages in overlapping VkPushConstantRange stageFlags (%s), offset (%" PRIu32 "), and size (%" PRIu32 ") in %s.", string_VkShaderStageFlags(stageFlags).c_str(), offset, size, string_VkShaderStageFlags(range.stageFlags).c_str(), range.offset, range.size, report_data->FormatHandle(layout).c_str()); } // Accumulate all stages we've found found_stages = matching_stages | found_stages; } } if (found_stages != stageFlags) { uint32_t missing_stages = ~found_stages & stageFlags; skip |= LogError( commandBuffer, "VUID-vkCmdPushConstants-offset-01795", "vkCmdPushConstants(): %s, VkPushConstantRange in %s overlapping offset = %d and size = %d, do not contain %s.", string_VkShaderStageFlags(stageFlags).c_str(), report_data->FormatHandle(layout).c_str(), offset, size, string_VkShaderStageFlags(missing_stages).c_str()); } } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()"); const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-queryPool-01416", "vkCmdWriteTimestamp(): Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", report_data->FormatHandle(queryPool).c_str()); } const uint32_t timestamp_valid_bits = GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits; if (timestamp_valid_bits == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-timestampValidBits-00829", "vkCmdWriteTimestamp(): Query Pool %s has a timestampValidBits value of zero.", report_data->FormatHandle(queryPool).c_str()); } if ((query_pool_state != nullptr) && (slot >= query_pool_state->createInfo.queryCount)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp-query-04904", "vkCmdWriteTimestamp(): query (%" PRIu32 ") is not lower than the number of queries (%" PRIu32 ") in Query pool %s.", slot, query_pool_state->createInfo.queryCount, report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::PreCallValidateCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR stage, VkQueryPool queryPool, uint32_t slot) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp2KHR()"); Location loc(Func::vkCmdWriteTimestamp2KHR, Field::stage); if ((stage & (stage - 1)) != 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-stage-03859", "%s (%s) must only set a single pipeline stage.", loc.Message().c_str(), string_VkPipelineStageFlags2KHR(stage).c_str()); } skip |= ValidatePipelineStage(LogObjectList(cb_state->commandBuffer()), loc, cb_state->GetQueueFlags(), stage); loc.field = Field::queryPool; const QUERY_POOL_STATE *query_pool_state = GetQueryPoolState(queryPool); if ((query_pool_state != nullptr) && (query_pool_state->createInfo.queryType != VK_QUERY_TYPE_TIMESTAMP)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-queryPool-03861", "%s Query Pool %s was not created with VK_QUERY_TYPE_TIMESTAMP.", loc.Message().c_str(), report_data->FormatHandle(queryPool).c_str()); } const uint32_t timestampValidBits = GetPhysicalDeviceState()->queue_family_properties[cb_state->command_pool->queueFamilyIndex].timestampValidBits; if (timestampValidBits == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdWriteTimestamp2KHR-timestampValidBits-03863", "%s Query Pool %s has a timestampValidBits value of zero.", loc.Message().c_str(), report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PreCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query = {queryPool, slot}; const char *func_name = "vkCmdWriteTimestamp()"; cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); }); } void CoreChecks::PreCallRecordCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage, VkQueryPool queryPool, uint32_t slot) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query = {queryPool, slot}; const char *func_name = "vkCmdWriteTimestamp()"; cb_state->queryUpdates.emplace_back([commandBuffer, query, func_name](const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; return VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); }); } void CoreChecks::PreCallRecordCmdWriteAccelerationStructuresPropertiesKHR(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) { if (disabled[query_validation]) return; // Enqueue the submit time validation check here, before the submit time state update in StateTracker::PostCall... CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); const char *func_name = "vkCmdWriteAccelerationStructuresPropertiesKHR()"; cb_state->queryUpdates.emplace_back([accelerationStructureCount, commandBuffer, firstQuery, func_name, queryPool]( const ValidationStateTracker *device_data, bool do_validate, VkQueryPool &firstPerfQueryPool, uint32_t perfPass, QueryMap *localQueryToStateMap) { if (!do_validate) return false; bool skip = false; for (uint32_t i = 0; i < accelerationStructureCount; i++) { QueryObject query = {{queryPool, firstQuery + i}, perfPass}; skip |= VerifyQueryIsReset(device_data, commandBuffer, query, func_name, firstPerfQueryPool, perfPass, localQueryToStateMap); } return skip; }); } bool CoreChecks::MatchUsage(uint32_t count, const VkAttachmentReference2 *attachments, const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) const { bool skip = false; if (attachments) { for (uint32_t attach = 0; attach < count; attach++) { if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) { // Attachment counts are verified elsewhere, but prevent an invalid access if (attachments[attach].attachment < fbci->attachmentCount) { if ((fbci->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment]; auto view_state = GetImageViewState(*image_view); if (view_state) { const VkImageCreateInfo *ici = &GetImageState(view_state->create_info.image)->createInfo; if (ici != nullptr) { auto creation_usage = ici->usage; const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(ici->pNext); if (stencil_usage_info) { creation_usage |= stencil_usage_info->stencilUsage; } if ((creation_usage & usage_flag) == 0) { skip |= LogError(device, error_code, "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } else { const VkFramebufferAttachmentsCreateInfo *fbaci = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(fbci->pNext); if (fbaci != nullptr && fbaci->pAttachmentImageInfos != nullptr && fbaci->attachmentImageInfoCount > attachments[attach].attachment) { uint32_t image_usage = fbaci->pAttachmentImageInfos[attachments[attach].attachment].usage; if ((image_usage & usage_flag) == 0) { skip |= LogError(device, error_code, "vkCreateFramebuffer: Framebuffer attachment info (%d) conflicts with the image's " "IMAGE_USAGE flags (%s).", attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag)); } } } } } } } return skip; } bool CoreChecks::ValidateFramebufferCreateInfo(const VkFramebufferCreateInfo *pCreateInfo) const { bool skip = false; const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(pCreateInfo->pNext); if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) { if (!enabled_features.core12.imagelessFramebuffer) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03189", "vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, " "but the imagelessFramebuffer feature is not enabled."); } if (framebuffer_attachments_create_info == nullptr) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03190", "vkCreateFramebuffer(): VkFramebufferCreateInfo flags includes VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, " "but no instance of VkFramebufferAttachmentsCreateInfo is present in the pNext chain."); } else { if (framebuffer_attachments_create_info->attachmentImageInfoCount != 0 && framebuffer_attachments_create_info->attachmentImageInfoCount != pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-03191", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount is %u, but " "VkFramebufferAttachmentsCreateInfo attachmentImageInfoCount is %u.", pCreateInfo->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount); } } } auto rp_state = GetRenderPassState(pCreateInfo->renderPass); if (rp_state) { const VkRenderPassCreateInfo2 *rpci = rp_state->createInfo.ptr(); if (rpci->attachmentCount != pCreateInfo->attachmentCount) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-attachmentCount-00876", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount " "of %u of %s being used to create Framebuffer.", pCreateInfo->attachmentCount, rpci->attachmentCount, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } else { // attachmentCounts match, so make sure corresponding attachment details line up if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { const VkImageView *image_views = pCreateInfo->pAttachments; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto view_state = GetImageViewState(image_views[i]); if (view_state == nullptr) { skip |= LogError( image_views[i], "VUID-VkFramebufferCreateInfo-flags-02778", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is not a valid VkImageView.", i); } else { auto &ivci = view_state->create_info; auto &subresource_range = view_state->normalized_subresource_range; if (ivci.format != rpci->pAttachments[i].format) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00880", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not " "match the format of %s used by the corresponding attachment for %s.", i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } const VkImageCreateInfo *ici = &GetImageState(ivci.image)->createInfo; if (ici->samples != rpci->pAttachments[i].samples) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-pAttachments-00881", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not " "match the %s " "samples used by the corresponding attachment for %s.", i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } // Verify that image memory is valid auto image_data = GetImageState(ivci.image); skip |= ValidateMemoryIsBoundToImage(image_data, "vkCreateFramebuffer()", "UNASSIGNED-CoreValidation-BoundResourceFreedMemoryAccess"); // Verify that view only has a single mip level if (subresource_range.levelCount != 1) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-00883", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u but " "only a single mip level (levelCount == 1) is allowed when creating a Framebuffer.", i, subresource_range.levelCount); } const uint32_t mip_level = subresource_range.baseMipLevel; uint32_t mip_width = max(1u, ici->extent.width >> mip_level); uint32_t mip_height = max(1u, ici->extent.height >> mip_level); bool used_as_input_color_resolve_depth_stencil_attachment = false; bool used_as_fragment_shading_rate_attachment = false; bool fsr_non_zero_viewmasks = false; for (uint32_t j = 0; j < rpci->subpassCount; ++j) { const VkSubpassDescription2 &subpass = rpci->pSubpasses[j]; uint32_t highest_view_bit = 0; for (uint32_t k = 0; k < 32; ++k) { if (((subpass.viewMask >> k) & 1) != 0) { highest_view_bit = k; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) { if (subpass.pInputAttachments[k].attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) { if (subpass.pColorAttachments[k].attachment == i || (subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; } if (used_as_input_color_resolve_depth_stencil_attachment) { if (subresource_range.layerCount <= highest_view_bit) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-renderPass-04536", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, subresource_range.layerCount, highest_view_bit, j); } } if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment; fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext); if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) { used_as_fragment_shading_rate_attachment = true; if ((mip_width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04539", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level " "%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "width (%u) and the " "specified shading rate texel width (%u) are smaller than the " "corresponding framebuffer width (%u).", i, subresource_range.baseMipLevel, j, mip_width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width); } if ((mip_height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04540", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u " "is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "height (%u) and the " "specified shading rate texel height (%u) are smaller than the corresponding " "framebuffer height (%u).", i, subresource_range.baseMipLevel, j, mip_height, fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height); } if (highest_view_bit != 0) { fsr_non_zero_viewmasks = true; } if (subresource_range.layerCount <= highest_view_bit) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04537", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, subresource_range.layerCount, highest_view_bit, j); } } } } if (enabled_features.fragment_density_map_features.fragmentDensityMap) { const VkRenderPassFragmentDensityMapCreateInfoEXT *fdm_attachment; fdm_attachment = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(rpci->pNext); if (fdm_attachment && fdm_attachment->fragmentDensityMapAttachment.attachment == i) { uint32_t ceiling_width = static_cast<uint32_t>(ceil( static_cast<float>(pCreateInfo->width) / std::max(static_cast<float>( phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.width), 1.0f))); if (mip_width < ceiling_width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-02555", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has width " "smaller than the corresponding the ceiling of framebuffer width / " "maxFragmentDensityTexelSize.width " "Here are the respective dimensions for attachment #%u, the ceiling value:\n " "attachment #%u, framebuffer:\n" "width: %u, the ceiling value: %u\n", i, subresource_range.baseMipLevel, i, i, mip_width, ceiling_width); } uint32_t ceiling_height = static_cast<uint32_t>(ceil( static_cast<float>(pCreateInfo->height) / std::max(static_cast<float>( phys_dev_ext_props.fragment_density_map_props.maxFragmentDensityTexelSize.height), 1.0f))); if (mip_height < ceiling_height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-02556", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has height " "smaller than the corresponding the ceiling of framebuffer height / " "maxFragmentDensityTexelSize.height " "Here are the respective dimensions for attachment #%u, the ceiling value:\n " "attachment #%u, framebuffer:\n" "height: %u, the ceiling value: %u\n", i, subresource_range.baseMipLevel, i, i, mip_height, ceiling_height); } } } if (used_as_input_color_resolve_depth_stencil_attachment) { if (mip_width < pCreateInfo->width) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04533", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has " "width (%u) smaller than the corresponding framebuffer width (%u).", i, mip_level, mip_width, pCreateInfo->width); } if (mip_height < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04534", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has " "height (%u) smaller than the corresponding framebuffer height (%u).", i, mip_level, mip_height, pCreateInfo->height); } if (subresource_range.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04535", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, subresource_range.layerCount, pCreateInfo->layers); } } if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) { if (subresource_range.layerCount != 1 && subresource_range.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04538", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, subresource_range.layerCount, pCreateInfo->layers); } } if (IsIdentitySwizzle(ivci.components) == false) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-pAttachments-00884", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g), string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a)); } if ((ivci.viewType == VK_IMAGE_VIEW_TYPE_2D) || (ivci.viewType == VK_IMAGE_VIEW_TYPE_2D)) { const auto image_state = GetImageState(ivci.image); if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) { if (FormatIsDepthOrStencil(ivci.format)) { LogObjectList objlist(device); objlist.add(ivci.image); skip |= LogError( objlist, "VUID-VkFramebufferCreateInfo-pAttachments-00891", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type of " "%s " "which was taken from image %s of type VK_IMAGE_TYPE_3D, but the image view format is a " "depth/stencil format %s", i, string_VkImageViewType(ivci.viewType), report_data->FormatHandle(ivci.image).c_str(), string_VkFormat(ivci.format)); } } } if (ivci.viewType == VK_IMAGE_VIEW_TYPE_3D) { LogObjectList objlist(device); objlist.add(image_views[i]); skip |= LogError(objlist, "VUID-VkFramebufferCreateInfo-flags-04113", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has an image view type " "of VK_IMAGE_VIEW_TYPE_3D", i); } } } } else if (framebuffer_attachments_create_info) { // VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT is set for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { auto &aii = framebuffer_attachments_create_info->pAttachmentImageInfos[i]; bool format_found = false; for (uint32_t j = 0; j < aii.viewFormatCount; ++j) { if (aii.pViewFormats[j] == rpci->pAttachments[i].format) { format_found = true; } } if (!format_found) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-flags-03205", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u does not include " "format %s used " "by the corresponding attachment for renderPass (%s).", i, string_VkFormat(rpci->pAttachments[i].format), report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } bool used_as_input_color_resolve_depth_stencil_attachment = false; bool used_as_fragment_shading_rate_attachment = false; bool fsr_non_zero_viewmasks = false; for (uint32_t j = 0; j < rpci->subpassCount; ++j) { const VkSubpassDescription2 &subpass = rpci->pSubpasses[j]; uint32_t highest_view_bit = 0; for (int k = 0; k < 32; ++k) { if (((subpass.viewMask >> k) & 1) != 0) { highest_view_bit = k; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].inputAttachmentCount; ++k) { if (subpass.pInputAttachments[k].attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } for (uint32_t k = 0; k < rpci->pSubpasses[j].colorAttachmentCount; ++k) { if (subpass.pColorAttachments[k].attachment == i || (subpass.pResolveAttachments && subpass.pResolveAttachments[k].attachment == i)) { used_as_input_color_resolve_depth_stencil_attachment = true; break; } } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment == i) { used_as_input_color_resolve_depth_stencil_attachment = true; } if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fsr_attachment; fsr_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass.pNext); if (fsr_attachment && fsr_attachment->pFragmentShadingRateAttachment->attachment == i) { used_as_fragment_shading_rate_attachment = true; if ((aii.width * fsr_attachment->shadingRateAttachmentTexelSize.width) < pCreateInfo->width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04543", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its width (%u) and the " "specified shading rate texel width (%u) are smaller than the corresponding framebuffer " "width (%u).", i, j, aii.width, fsr_attachment->shadingRateAttachmentTexelSize.width, pCreateInfo->width); } if ((aii.height * fsr_attachment->shadingRateAttachmentTexelSize.height) < pCreateInfo->height) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04544", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u is used as a " "fragment shading rate attachment in subpass %u, but the product of its " "height (%u) and the " "specified shading rate texel height (%u) are smaller than the corresponding " "framebuffer height (%u).", i, j, aii.height, fsr_attachment->shadingRateAttachmentTexelSize.height, pCreateInfo->height); } if (highest_view_bit != 0) { fsr_non_zero_viewmasks = true; } if (aii.layerCount != 1 && aii.layerCount <= highest_view_bit) { skip |= LogError( device, kVUIDUndefined, "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "less than or equal to the highest bit in the view mask (%u) of subpass %u.", i, aii.layerCount, highest_view_bit, j); } } } } if (used_as_input_color_resolve_depth_stencil_attachment) { if (aii.width < pCreateInfo->width) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04541", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a width of only #%u, " "but framebuffer has a width of #%u.", i, aii.width, pCreateInfo->width); } if (aii.height < pCreateInfo->height) { skip |= LogError( device, "VUID-VkFramebufferCreateInfo-flags-04542", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has a height of only #%u, " "but framebuffer has a height of #%u.", i, aii.height, pCreateInfo->height); } const char *mismatched_layers_no_multiview_vuid = device_extensions.vk_khr_multiview ? "VUID-VkFramebufferCreateInfo-renderPass-04546" : "VUID-VkFramebufferCreateInfo-flags-04547"; if ((rpci->subpassCount == 0) || (rpci->pSubpasses[0].viewMask == 0)) { if (aii.layerCount < pCreateInfo->layers) { skip |= LogError( device, mismatched_layers_no_multiview_vuid, "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info #%u has only #%u layers, " "but framebuffer has #%u layers.", i, aii.layerCount, pCreateInfo->layers); } } } if (used_as_fragment_shading_rate_attachment && !fsr_non_zero_viewmasks) { if (aii.layerCount != 1 && aii.layerCount < pCreateInfo->layers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-flags-04545", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has a layer count (%u) " "smaller than the corresponding framebuffer layer count (%u).", i, aii.layerCount, pCreateInfo->layers); } } } // Validate image usage uint32_t attachment_index = VK_ATTACHMENT_UNUSED; for (uint32_t i = 0; i < rpci->subpassCount; ++i) { skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201"); skip |= MatchUsage(rpci->pSubpasses[i].colorAttachmentCount, rpci->pSubpasses[i].pResolveAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03201"); skip |= MatchUsage(1, rpci->pSubpasses[i].pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03202"); skip |= MatchUsage(rpci->pSubpasses[i].inputAttachmentCount, rpci->pSubpasses[i].pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03204"); const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext); if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr) { skip |= MatchUsage(1, depth_stencil_resolve->pDepthStencilResolveAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-flags-03203"); } const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(rpci->pSubpasses[i].pNext); if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && fragment_shading_rate_attachment_info != nullptr) { skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, "VUID-VkFramebufferCreateInfo-flags-04549"); } } if (device_extensions.vk_khr_multiview) { if ((rpci->subpassCount > 0) && (rpci->pSubpasses[0].viewMask != 0)) { for (uint32_t i = 0; i < rpci->subpassCount; ++i) { const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(rpci->pSubpasses[i].pNext); uint32_t view_bits = rpci->pSubpasses[i].viewMask; uint32_t highest_view_bit = 0; for (int j = 0; j < 32; ++j) { if (((view_bits >> j) & 1) != 0) { highest_view_bit = j; } } for (uint32_t j = 0; j < rpci->pSubpasses[i].colorAttachmentCount; ++j) { attachment_index = rpci->pSubpasses[i].pColorAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a color attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } if (rpci->pSubpasses[i].pResolveAttachments) { attachment_index = rpci->pSubpasses[i].pResolveAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a resolve attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } } } for (uint32_t j = 0; j < rpci->pSubpasses[i].inputAttachmentCount; ++j) { attachment_index = rpci->pSubpasses[i].pInputAttachments[j].attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as an input attachment %u.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit, j); } } } if (rpci->pSubpasses[i].pDepthStencilAttachment != nullptr) { attachment_index = rpci->pSubpasses[i].pDepthStencilAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a depth/stencil attachment.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit); } } if (device_extensions.vk_khr_depth_stencil_resolve && depth_stencil_resolve != nullptr && depth_stencil_resolve->pDepthStencilResolveAttachment != nullptr) { attachment_index = depth_stencil_resolve->pDepthStencilResolveAttachment->attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { uint32_t layer_count = framebuffer_attachments_create_info->pAttachmentImageInfos[attachment_index].layerCount; if (layer_count <= highest_view_bit) { skip |= LogError( pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-03198", "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment info %u " "only specifies %u layers, but the view mask for subpass %u in renderPass (%s) " "includes layer %u, with that attachment specified as a depth/stencil resolve " "attachment.", attachment_index, layer_count, i, report_data->FormatHandle(pCreateInfo->renderPass).c_str(), highest_view_bit); } } } } } } } } if ((pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { // Verify correct attachment usage flags for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) { const VkSubpassDescription2 &subpass_description = rpci->pSubpasses[subpass]; // Verify input attachments: skip |= MatchUsage(subpass_description.inputAttachmentCount, subpass_description.pInputAttachments, pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00879"); // Verify color attachments: skip |= MatchUsage(subpass_description.colorAttachmentCount, subpass_description.pColorAttachments, pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-00877"); // Verify depth/stencil attachments: skip |= MatchUsage(1, subpass_description.pDepthStencilAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02633"); // Verify depth/stecnil resolve if (device_extensions.vk_khr_depth_stencil_resolve) { const VkSubpassDescriptionDepthStencilResolve *ds_resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_description.pNext); if (ds_resolve) { skip |= MatchUsage(1, ds_resolve->pDepthStencilResolveAttachment, pCreateInfo, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, "VUID-VkFramebufferCreateInfo-pAttachments-02634"); } } // Verify fragment shading rate attachments if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment_info = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(subpass_description.pNext); if (fragment_shading_rate_attachment_info) { skip |= MatchUsage(1, fragment_shading_rate_attachment_info->pFragmentShadingRateAttachment, pCreateInfo, VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, "VUID-VkFramebufferCreateInfo-flags-04548"); } } } } bool b_has_non_zero_view_masks = false; for (uint32_t i = 0; i < rpci->subpassCount; ++i) { if (rpci->pSubpasses[i].viewMask != 0) { b_has_non_zero_view_masks = true; break; } } if (b_has_non_zero_view_masks && pCreateInfo->layers != 1) { skip |= LogError(pCreateInfo->renderPass, "VUID-VkFramebufferCreateInfo-renderPass-02531", "vkCreateFramebuffer(): VkFramebufferCreateInfo has #%u layers but " "renderPass (%s) was specified with non-zero view masks\n", pCreateInfo->layers, report_data->FormatHandle(pCreateInfo->renderPass).c_str()); } } } // Verify FB dimensions are within physical device limits if (pCreateInfo->width > phys_dev_props.limits.maxFramebufferWidth) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00886", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. Requested " "width: %u, device max: %u\n", pCreateInfo->width, phys_dev_props.limits.maxFramebufferWidth); } if (pCreateInfo->height > phys_dev_props.limits.maxFramebufferHeight) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00888", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. Requested " "height: %u, device max: %u\n", pCreateInfo->height, phys_dev_props.limits.maxFramebufferHeight); } if (pCreateInfo->layers > phys_dev_props.limits.maxFramebufferLayers) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00890", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. Requested " "layers: %u, device max: %u\n", pCreateInfo->layers, phys_dev_props.limits.maxFramebufferLayers); } // Verify FB dimensions are greater than zero if (pCreateInfo->width <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-width-00885", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width must be greater than zero."); } if (pCreateInfo->height <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-height-00887", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height must be greater than zero."); } if (pCreateInfo->layers <= 0) { skip |= LogError(device, "VUID-VkFramebufferCreateInfo-layers-00889", "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers must be greater than zero."); } return skip; } bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) const { // TODO : Verify that renderPass FB is created with is compatible with FB bool skip = false; skip |= ValidateFramebufferCreateInfo(pCreateInfo); return skip; } static bool FindDependency(const uint32_t index, const uint32_t dependent, const std::vector<DAGNode> &subpass_to_node, layer_data::unordered_set<uint32_t> &processed_nodes) { // If we have already checked this node we have not found a dependency path so return false. if (processed_nodes.count(index)) return false; processed_nodes.insert(index); const DAGNode &node = subpass_to_node[index]; // Look for a dependency path. If one exists return true else recurse on the previous nodes. if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) { for (auto elem : node.prev) { if (FindDependency(elem, dependent, subpass_to_node, processed_nodes)) return true; } } else { return true; } return false; } bool CoreChecks::IsImageLayoutReadOnly(VkImageLayout layout) const { if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL) || (layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) { return true; } return false; } bool CoreChecks::CheckDependencyExists(const VkRenderPass renderpass, const uint32_t subpass, const VkImageLayout layout, const std::vector<SubpassLayout> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node, bool &skip) const { bool result = true; bool b_image_layout_read_only = IsImageLayoutReadOnly(layout); // Loop through all subpasses that share the same attachment and make sure a dependency exists for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) { const SubpassLayout &sp = dependent_subpasses[k]; if (subpass == sp.index) continue; if (b_image_layout_read_only && IsImageLayoutReadOnly(sp.layout)) continue; const DAGNode &node = subpass_to_node[subpass]; // Check for a specified dependency between the two nodes. If one exists we are done. auto prev_elem = std::find(node.prev.begin(), node.prev.end(), sp.index); auto next_elem = std::find(node.next.begin(), node.next.end(), sp.index); if (prev_elem == node.prev.end() && next_elem == node.next.end()) { // If no dependency exits an implicit dependency still might. If not, throw an error. layer_data::unordered_set<uint32_t> processed_nodes; if (!(FindDependency(subpass, sp.index, subpass_to_node, processed_nodes) || FindDependency(sp.index, subpass, subpass_to_node, processed_nodes))) { skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass, "A dependency between subpasses %d and %d must exist but one is not specified.", subpass, sp.index); result = false; } } } return result; } bool CoreChecks::CheckPreserved(const VkRenderPass renderpass, const VkRenderPassCreateInfo2 *pCreateInfo, const int index, const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) const { const DAGNode &node = subpass_to_node[index]; // If this node writes to the attachment return true as next nodes need to preserve the attachment. const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[index]; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (attachment == subpass.pColorAttachments[j].attachment) return true; } for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { if (attachment == subpass.pInputAttachments[j].attachment) return true; } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { if (attachment == subpass.pDepthStencilAttachment->attachment) return true; } bool result = false; // Loop through previous nodes and see if any of them write to the attachment. for (auto elem : node.prev) { result |= CheckPreserved(renderpass, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip); } // If the attachment was written to by a previous node than this node needs to preserve it. if (result && depth > 0) { bool has_preserved = false; for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { if (subpass.pPreserveAttachments[j] == attachment) { has_preserved = true; break; } } if (!has_preserved) { skip |= LogError(renderpass, kVUID_Core_DrawState_InvalidRenderpass, "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index); } } return result; } template <class T> bool IsRangeOverlapping(T offset1, T size1, T offset2, T size2) { return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) || ((offset1 > offset2) && (offset1 < (offset2 + size2))); } bool IsRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) { return (IsRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) && IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount)); } bool CoreChecks::ValidateDependencies(FRAMEBUFFER_STATE const *framebuffer, RENDER_PASS_STATE const *renderPass) const { bool skip = false; auto const framebuffer_info = framebuffer->createInfo.ptr(); auto const create_info = renderPass->createInfo.ptr(); auto const &subpass_to_node = renderPass->subpassToNode; struct Attachment { std::vector<SubpassLayout> outputs; std::vector<SubpassLayout> inputs; std::vector<uint32_t> overlapping; }; std::vector<Attachment> attachments(create_info->attachmentCount); if (!(framebuffer_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) { // Find overlapping attachments for (uint32_t i = 0; i < create_info->attachmentCount; ++i) { for (uint32_t j = i + 1; j < create_info->attachmentCount; ++j) { VkImageView viewi = framebuffer_info->pAttachments[i]; VkImageView viewj = framebuffer_info->pAttachments[j]; if (viewi == viewj) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); continue; } auto view_state_i = GetImageViewState(viewi); auto view_state_j = GetImageViewState(viewj); if (!view_state_i || !view_state_j) { continue; } auto view_ci_i = view_state_i->create_info; auto view_ci_j = view_state_j->create_info; if (view_ci_i.image == view_ci_j.image && IsRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); continue; } auto image_data_i = GetImageState(view_ci_i.image); auto image_data_j = GetImageState(view_ci_j.image); if (!image_data_i || !image_data_j) { continue; } const auto *binding_i = image_data_i->Binding(); const auto *binding_j = image_data_j->Binding(); if (binding_i && binding_j && binding_i->mem_state == binding_j->mem_state && IsRangeOverlapping(binding_i->offset, binding_i->size, binding_j->offset, binding_j->size)) { attachments[i].overlapping.emplace_back(j); attachments[j].overlapping.emplace_back(i); } } } } // Find for each attachment the subpasses that use them. layer_data::unordered_set<uint32_t> attachment_indices; for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; attachment_indices.clear(); for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; SubpassLayout sp = {i, subpass.pInputAttachments[j].layout}; attachments[attachment].inputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].inputs.emplace_back(sp); } } for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; SubpassLayout sp = {i, subpass.pColorAttachments[j].layout}; attachments[attachment].outputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].outputs.emplace_back(sp); } attachment_indices.insert(attachment); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { uint32_t attachment = subpass.pDepthStencilAttachment->attachment; SubpassLayout sp = {i, subpass.pDepthStencilAttachment->layout}; attachments[attachment].outputs.emplace_back(sp); for (auto overlapping_attachment : attachments[attachment].overlapping) { attachments[overlapping_attachment].outputs.emplace_back(sp); } if (attachment_indices.count(attachment)) { skip |= LogError(renderPass->renderPass(), kVUID_Core_DrawState_InvalidRenderpass, "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i); } } } // If there is a dependency needed make sure one exists for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; // If the attachment is an input then all subpasses that output must have a dependency relationship for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { uint32_t attachment = subpass.pInputAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(renderPass->renderPass(), i, subpass.pInputAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip); } // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { uint32_t attachment = subpass.pColorAttachments[j].attachment; if (attachment == VK_ATTACHMENT_UNUSED) continue; CheckDependencyExists(renderPass->renderPass(), i, subpass.pColorAttachments[j].layout, attachments[attachment].outputs, subpass_to_node, skip); CheckDependencyExists(renderPass->renderPass(), i, subpass.pColorAttachments[j].layout, attachments[attachment].inputs, subpass_to_node, skip); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) { const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment; CheckDependencyExists(renderPass->renderPass(), i, subpass.pDepthStencilAttachment->layout, attachments[attachment].outputs, subpass_to_node, skip); CheckDependencyExists(renderPass->renderPass(), i, subpass.pDepthStencilAttachment->layout, attachments[attachment].inputs, subpass_to_node, skip); } } // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was // written. for (uint32_t i = 0; i < create_info->subpassCount; ++i) { const VkSubpassDescription2 &subpass = create_info->pSubpasses[i]; for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { CheckPreserved(renderPass->renderPass(), create_info, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip); } } return skip; } bool CoreChecks::ValidateRenderPassDAG(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo) const { bool skip = false; const char *vuid; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { const VkSubpassDependency2 &dependency = pCreateInfo->pDependencies[i]; auto latest_src_stage = sync_utils::GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask); auto earliest_dst_stage = sync_utils::GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask); // The first subpass here serves as a good proxy for "is multiview enabled" - since all view masks need to be non-zero if // any are, which enables multiview. if (use_rp2 && (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && (pCreateInfo->pSubpasses[0].viewMask == 0)) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-viewMask-03059", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but multiview is not enabled for this render pass.", i); } else if (use_rp2 && !(dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) && dependency.viewOffset != 0) { skip |= LogError(device, "VUID-VkSubpassDependency2-dependencyFlags-03092", "Dependency %u specifies the VK_DEPENDENCY_VIEW_LOCAL_BIT, but also specifies a view offset of %u.", i, dependency.viewOffset); } else if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) { if (dependency.srcSubpass == dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03085" : "VUID-VkSubpassDependency-srcSubpass-00865"; skip |= LogError(device, vuid, "The src and dst subpasses in dependency %u are both external.", i); } else if (dependency.dependencyFlags & VK_DEPENDENCY_VIEW_LOCAL_BIT) { if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency-dependencyFlags-02520"; } else { // dependency.dstSubpass == VK_SUBPASS_EXTERNAL vuid = "VUID-VkSubpassDependency-dependencyFlags-02521"; } if (use_rp2) { // Create render pass 2 distinguishes between source and destination external dependencies. if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL) { vuid = "VUID-VkSubpassDependency2-dependencyFlags-03090"; } else { vuid = "VUID-VkSubpassDependency2-dependencyFlags-03091"; } } skip |= LogError(device, vuid, "Dependency %u specifies an external dependency but also specifies VK_DEPENDENCY_VIEW_LOCAL_BIT.", i); } } else if (dependency.srcSubpass > dependency.dstSubpass) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03084" : "VUID-VkSubpassDependency-srcSubpass-00864"; skip |= LogError(device, vuid, "Dependency %u specifies a dependency from a later subpass (%u) to an earlier subpass (%u), which is " "disallowed to prevent cyclic dependencies.", i, dependency.srcSubpass, dependency.dstSubpass); } else if (dependency.srcSubpass == dependency.dstSubpass) { if (dependency.viewOffset != 0) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-viewOffset-02530" : "VUID-VkRenderPassCreateInfo-pNext-01930"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency but has a non-zero view offset of %u", i, dependency.viewOffset); } else if ((dependency.dependencyFlags | VK_DEPENDENCY_VIEW_LOCAL_BIT) != dependency.dependencyFlags && pCreateInfo->pSubpasses[dependency.srcSubpass].viewMask > 1) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pDependencies-03060" : "VUID-VkSubpassDependency-srcSubpass-00872"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency for subpass %u with a non-zero view mask, but does not " "specify VK_DEPENDENCY_VIEW_LOCAL_BIT.", i, dependency.srcSubpass); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) || HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask)) && (sync_utils::GetGraphicsPipelineStageLogicalOrdinal(latest_src_stage) > sync_utils::GetGraphicsPipelineStageLogicalOrdinal(earliest_dst_stage))) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-03087" : "VUID-VkSubpassDependency-srcSubpass-00867"; skip |= LogError( device, vuid, "Dependency %u specifies a self-dependency from logically-later stage (%s) to a logically-earlier stage (%s).", i, sync_utils::StringPipelineStageFlags(latest_src_stage).c_str(), sync_utils::StringPipelineStageFlags(earliest_dst_stage).c_str()); } else if ((HasNonFramebufferStagePipelineStageFlags(dependency.srcStageMask) == false) && (HasNonFramebufferStagePipelineStageFlags(dependency.dstStageMask) == false) && ((dependency.dependencyFlags & VK_DEPENDENCY_BY_REGION_BIT) == 0)) { vuid = use_rp2 ? "VUID-VkSubpassDependency2-srcSubpass-02245" : "VUID-VkSubpassDependency-srcSubpass-02243"; skip |= LogError(device, vuid, "Dependency %u specifies a self-dependency for subpass %u with both stages including a " "framebuffer-space stage, but does not specify VK_DEPENDENCY_BY_REGION_BIT in dependencyFlags.", i, dependency.srcSubpass); } } else if ((dependency.srcSubpass < dependency.dstSubpass) && ((pCreateInfo->pSubpasses[dependency.srcSubpass].flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0)) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-flags-04909" : "VUID-VkSubpassDescription-flags-03343"; skip |= LogError(device, vuid, "Dependency %u specifies that subpass %u has a dependency on a later subpass" "and includes VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM subpass flags.", i, dependency.srcSubpass); } } return skip; } bool CoreChecks::ValidateAttachmentIndex(RenderPassCreateVersion rp_version, uint32_t attachment, uint32_t attachment_count, const char *error_type, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); assert(attachment != VK_ATTACHMENT_UNUSED); if (attachment >= attachment_count) { const char *vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-03051" : "VUID-VkRenderPassCreateInfo-attachment-00834"; skip |= LogError(device, vuid, "%s: %s attachment %d must be less than the total number of attachments %d.", function_name, error_type, attachment, attachment_count); } return skip; } enum AttachmentType { ATTACHMENT_COLOR = 1, ATTACHMENT_DEPTH = 2, ATTACHMENT_INPUT = 4, ATTACHMENT_PRESERVE = 8, ATTACHMENT_RESOLVE = 16, }; char const *StringAttachmentType(uint8_t type) { switch (type) { case ATTACHMENT_COLOR: return "color"; case ATTACHMENT_DEPTH: return "depth"; case ATTACHMENT_INPUT: return "input"; case ATTACHMENT_PRESERVE: return "preserve"; case ATTACHMENT_RESOLVE: return "resolve"; default: return "(multiple)"; } } bool CoreChecks::AddAttachmentUse(RenderPassCreateVersion rp_version, uint32_t subpass, std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts, uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) const { if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */ bool skip = false; auto &uses = attachment_uses[attachment]; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCreateRenderPass2()" : "vkCreateRenderPass()"; if (uses & new_use) { if (attachment_layouts[attachment] != new_layout) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-layout-02528" : "VUID-VkSubpassDescription-layout-02519"; skip |= LogError(device, vuid, "%s: subpass %u already uses attachment %u with a different image layout (%s vs %s).", function_name, subpass, attachment, string_VkImageLayout(attachment_layouts[attachment]), string_VkImageLayout(new_layout)); } } else if (((new_use & ATTACHMENT_COLOR) && (uses & ATTACHMENT_DEPTH)) || ((uses & ATTACHMENT_COLOR) && (new_use & ATTACHMENT_DEPTH))) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-04440" : "VUID-VkSubpassDescription-pDepthStencilAttachment-04438"; skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else if ((uses && (new_use & ATTACHMENT_PRESERVE)) || (new_use && (uses & ATTACHMENT_PRESERVE))) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pPreserveAttachments-03074" : "VUID-VkSubpassDescription-pPreserveAttachments-00854"; skip |= LogError(device, vuid, "%s: subpass %u uses attachment %u as both %s and %s attachment.", function_name, subpass, attachment, StringAttachmentType(uses), StringAttachmentType(new_use)); } else { attachment_layouts[attachment] = new_layout; uses |= new_use; } return skip; } // Handles attachment references regardless of type (input, color, depth, etc) // Input attachments have extra VUs associated with them bool CoreChecks::ValidateAttachmentReference(RenderPassCreateVersion rp_version, VkAttachmentReference2 reference, const VkFormat attachment_format, bool input, const char *error_type, const char *function_name) const { bool skip = false; // Currently all VUs require attachment to not be UNUSED assert(reference.attachment != VK_ATTACHMENT_UNUSED); // currently VkAttachmentReference and VkAttachmentReference2 have no overlapping VUs if (rp_version == RENDER_PASS_VERSION_1) { switch (reference.layout) { case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: skip |= LogError(device, "VUID-VkAttachmentReference-layout-00857", "%s: Layout for %s is %s but must not be " "VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR|DEPTH_ATTACHMENT_OPTIMAL|DEPTH_READ_" "ONLY_OPTIMAL|STENCIL_ATTACHMENT_OPTIMAL|STENCIL_READ_ONLY_OPTIMAL].", function_name, error_type, string_VkImageLayout(reference.layout)); break; default: break; } } else { const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(reference.pNext); switch (reference.layout) { case VK_IMAGE_LAYOUT_UNDEFINED: case VK_IMAGE_LAYOUT_PREINITIALIZED: case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: skip |= LogError(device, "VUID-VkAttachmentReference2-layout-03077", "%s: Layout for %s is %s but must not be VK_IMAGE_LAYOUT_[UNDEFINED|PREINITIALIZED|PRESENT_SRC_KHR].", function_name, error_type, string_VkImageLayout(reference.layout)); break; // Only other layouts in VUs to be checked case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: // First need to make sure feature bit is enabled and the format is actually a depth and/or stencil if (!enabled_features.core12.separateDepthStencilLayouts) { skip |= LogError(device, "VUID-VkAttachmentReference2-separateDepthStencilLayouts-03313", "%s: Layout for %s is %s but without separateDepthStencilLayouts enabled the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout)); } else if (!FormatIsDepthOrStencil(attachment_format)) { // using this over FormatIsColor() incase a multiplane and/or undef would sneak in // "color" format is still an ambiguous term in spec (internal issue #2484) skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04754", "%s: Layout for %s is %s but the attachment is a not a depth/stencil format (%s) so the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } else { if ((reference.layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL) || (reference.layout == VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL)) { if (FormatIsDepthOnly(attachment_format)) { skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04756", "%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not " "be VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } } else { // DEPTH_ATTACHMENT_OPTIMAL || DEPTH_READ_ONLY_OPTIMAL if (FormatIsStencilOnly(attachment_format)) { skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04757", "%s: Layout for %s is %s but the attachment is a depth-only format (%s) so the layout must not " "be VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } if (attachment_reference_stencil_layout) { // This check doesn't rely on the aspect mask value const VkImageLayout stencil_layout = attachment_reference_stencil_layout->stencilLayout; // clang-format off if (stencil_layout == VK_IMAGE_LAYOUT_UNDEFINED || stencil_layout == VK_IMAGE_LAYOUT_PREINITIALIZED || stencil_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL || stencil_layout == VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) { skip |= LogError(device, "VUID-VkAttachmentReferenceStencilLayout-stencilLayout-03318", "%s: In %s with pNext chain instance VkAttachmentReferenceStencilLayout, " "the stencilLayout (%s) must not be " "VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_PREINITIALIZED, " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, or " "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR.", function_name, error_type, string_VkImageLayout(stencil_layout)); } // clang-format on } else if (FormatIsDepthAndStencil(attachment_format)) { skip |= LogError( device, "VUID-VkAttachmentReference2-attachment-04755", "%s: Layout for %s is %s but the attachment is a depth and stencil format (%s) so if the layout is " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL there needs " "to be a VkAttachmentReferenceStencilLayout in the pNext chain to set the seperate stencil layout " "because the separateDepthStencilLayouts feature is enabled.", function_name, error_type, string_VkImageLayout(reference.layout), string_VkFormat(attachment_format)); } } } break; default: break; } } return skip; } bool CoreChecks::ValidateRenderpassAttachmentUsage(RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkFormat format = pCreateInfo->pAttachments[i].format; if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { if ((FormatIsColor(format) || FormatHasDepth(format)) && pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass, "%s: Render pass pAttachment[%u] has loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout == " "VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass.", function_name, i); } if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_Core_DrawState_InvalidRenderpass, "%s: Render pass pAttachment[%u] has stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD and initialLayout " "== VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you intended. Consider using " "VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the image truely is undefined at the start of the " "render pass.", function_name, i); } } } // Track when we're observing the first use of an attachment std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true); for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; std::vector<uint8_t> attachment_uses(pCreateInfo->attachmentCount); std::vector<VkImageLayout> attachment_layouts(pCreateInfo->attachmentCount); // Track if attachments are used as input as well as another type layer_data::unordered_set<uint32_t> input_attachments; if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pipelineBindPoint-03062" : "VUID-VkSubpassDescription-pipelineBindPoint-00844"; skip |= LogError(device, vuid, "%s: Pipeline bind point for pSubpasses[%d] must be VK_PIPELINE_BIND_POINT_GRAPHICS.", function_name, i); } // Check input attachments first // - so we can detect first-use-as-input for VU #00349 // - if other color or depth/stencil is also input, it limits valid layouts for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) { auto const &attachment_ref = subpass.pInputAttachments[j]; const uint32_t attachment_index = attachment_ref.attachment; const VkImageAspectFlags aspect_mask = attachment_ref.aspectMask; if (attachment_index != VK_ATTACHMENT_UNUSED) { input_attachments.insert(attachment_index); std::string error_type = "pSubpasses[" + std::to_string(i) + "].pInputAttachments[" + std::to_string(j) + "]"; skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-02801" : "VUID-VkInputAttachmentAspectReference-aspectMask-01964"; skip |= LogError( device, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes VK_IMAGE_ASPECT_METADATA_BIT.", function_name, j, i); } else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-04563" : "VUID-VkInputAttachmentAspectReference-aspectMask-02250"; skip |= LogError(device, vuid, "%s: Aspect mask for input attachment reference %d in subpass %d includes " "VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit.", function_name, j, i); } // safe to dereference pCreateInfo->pAttachments[] if (attachment_index < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format; skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, true, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_INPUT, attachment_ref.layout); vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-attachment-02525" : "VUID-VkRenderPassCreateInfo-pNext-01963"; skip |= ValidateImageAspectMask(VK_NULL_HANDLE, attachment_format, aspect_mask, function_name, vuid); if (attach_first_use[attachment_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pInputAttachments[j].layout, attachment_index, pCreateInfo->pAttachments[attachment_index]); bool used_as_depth = (subpass.pDepthStencilAttachment != NULL && subpass.pDepthStencilAttachment->attachment == attachment_index); bool used_as_color = false; for (uint32_t k = 0; !used_as_depth && !used_as_color && k < subpass.colorAttachmentCount; ++k) { used_as_color = (subpass.pColorAttachments[k].attachment == attachment_index); } if (!used_as_depth && !used_as_color && pCreateInfo->pAttachments[attachment_index].loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-loadOp-03064" : "VUID-VkSubpassDescription-loadOp-00846"; skip |= LogError(device, vuid, "%s: attachment %u is first used as an input attachment in %s with loadOp set to " "VK_ATTACHMENT_LOAD_OP_CLEAR.", function_name, attachment_index, error_type.c_str()); } } attach_first_use[attachment_index] = false; const VkFormatFeatureFlags valid_flags = VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & valid_flags) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pInputAttachments-02897" : "VUID-VkSubpassDescription-pInputAttachments-02647"; skip |= LogError(device, vuid, "%s: Input attachment %s format (%s) does not contain VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT " "| VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } } if (rp_version == RENDER_PASS_VERSION_2) { // These are validated automatically as part of parameter validation for create renderpass 1 // as they are in a struct that only applies to input attachments - not so for v2. // Check for 0 if (aspect_mask == 0) { skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02800", "%s: Input attachment %s aspect mask must not be 0.", function_name, error_type.c_str()); } else { const VkImageAspectFlags valid_bits = (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_METADATA_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT); // Check for valid aspect mask bits if (aspect_mask & ~valid_bits) { skip |= LogError(device, "VUID-VkSubpassDescription2-attachment-02799", "%s: Input attachment %s aspect mask (0x%" PRIx32 ")is invalid.", function_name, error_type.c_str(), aspect_mask); } } } // Validate layout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (attachment_ref.layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_GENERAL: case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR: break; // valid layouts default: skip |= LogError(device, vuid, "%s: %s layout is %s but input attachments must be " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL, or " "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); break; } } } for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pPreserveAttachments[" + std::to_string(j) + "]"; uint32_t attachment = subpass.pPreserveAttachments[j]; if (attachment == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-attachment-03073" : "VUID-VkSubpassDescription-attachment-00853"; skip |= LogError(device, vuid, "%s: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED.", function_name, j); } else { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); if (attachment < pCreateInfo->attachmentCount) { skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_PRESERVE, VkImageLayout(0) /* preserve doesn't have any layout */); } } } bool subpass_performs_resolve = false; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { if (subpass.pResolveAttachments) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pResolveAttachments[" + std::to_string(j) + "]"; auto const &attachment_ref = subpass.pResolveAttachments[j]; if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_ref.attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); // safe to dereference pCreateInfo->pAttachments[] if (attachment_ref.attachment < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_ref.attachment].format; skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_ref.attachment, ATTACHMENT_RESOLVE, attachment_ref.layout); subpass_performs_resolve = true; if (pCreateInfo->pAttachments[attachment_ref.attachment].samples != VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03067" : "VUID-VkSubpassDescription-pResolveAttachments-00849"; skip |= LogError( device, vuid, "%s: Subpass %u requests multisample resolve into attachment %u, which must " "have VK_SAMPLE_COUNT_1_BIT but has %s.", function_name, i, attachment_ref.attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment_ref.attachment].samples)); } const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-02899" : "VUID-VkSubpassDescription-pResolveAttachments-02649"; skip |= LogError(device, vuid, "%s: Resolve attachment %s format (%s) does not contain " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } // VK_QCOM_render_pass_shader_resolve check of resolve attachmnents if ((subpass.flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-flags-04907" : "VUID-VkSubpassDescription-flags-03341"; skip |= LogError( device, vuid, "%s: Subpass %u enables shader resolve, which requires every element of pResolve attachments" " must be VK_ATTACHMENT_UNUSED, but element %u contains a reference to attachment %u instead.", function_name, i, j, attachment_ref.attachment); } } } } } if (subpass.pDepthStencilAttachment) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pDepthStencilAttachment"; const uint32_t attachment = subpass.pDepthStencilAttachment->attachment; const VkImageLayout image_layout = subpass.pDepthStencilAttachment->layout; if (attachment != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment, pCreateInfo->attachmentCount, error_type.c_str(), function_name); // safe to dereference pCreateInfo->pAttachments[] if (attachment < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment].format; skip |= ValidateAttachmentReference(rp_version, *subpass.pDepthStencilAttachment, attachment_format, false, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment, ATTACHMENT_DEPTH, image_layout); if (attach_first_use[attachment]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, image_layout, attachment, pCreateInfo->pAttachments[attachment]); } attach_first_use[attachment] = false; const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-02900" : "VUID-VkSubpassDescription-pDepthStencilAttachment-02650"; skip |= LogError(device, vuid, "%s: Depth Stencil %s format (%s) does not contain " "VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } } // Check for valid imageLayout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (image_layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_GENERAL: break; // valid layouts case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL: case VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR: case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR: if (input_attachments.find(attachment) != input_attachments.end()) { skip |= LogError( device, vuid, "%s: %s is also an input attachment so the layout (%s) must not be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR " "or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(image_layout)); } break; default: skip |= LogError(device, vuid, "%s: %s layout is %s but depth/stencil attachments must be " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, " "VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " "VK_IMAGE_LAYOUT_GENERAL, " "VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR or" "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(image_layout)); break; } } } uint32_t last_sample_count_attachment = VK_ATTACHMENT_UNUSED; for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) { std::string error_type = "pSubpasses[" + std::to_string(i) + "].pColorAttachments[" + std::to_string(j) + "]"; auto const &attachment_ref = subpass.pColorAttachments[j]; const uint32_t attachment_index = attachment_ref.attachment; if (attachment_index != VK_ATTACHMENT_UNUSED) { skip |= ValidateAttachmentIndex(rp_version, attachment_index, pCreateInfo->attachmentCount, error_type.c_str(), function_name); // safe to dereference pCreateInfo->pAttachments[] if (attachment_index < pCreateInfo->attachmentCount) { const VkFormat attachment_format = pCreateInfo->pAttachments[attachment_index].format; skip |= ValidateAttachmentReference(rp_version, attachment_ref, attachment_format, false, error_type.c_str(), function_name); skip |= AddAttachmentUse(rp_version, i, attachment_uses, attachment_layouts, attachment_index, ATTACHMENT_COLOR, attachment_ref.layout); VkSampleCountFlagBits current_sample_count = pCreateInfo->pAttachments[attachment_index].samples; if (last_sample_count_attachment != VK_ATTACHMENT_UNUSED) { VkSampleCountFlagBits last_sample_count = pCreateInfo->pAttachments[subpass.pColorAttachments[last_sample_count_attachment].attachment].samples; if (current_sample_count != last_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03069" : "VUID-VkSubpassDescription-pColorAttachments-01417"; skip |= LogError( device, vuid, "%s: Subpass %u attempts to render to color attachments with inconsistent sample counts." "Color attachment ref %u has sample count %s, whereas previous color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(current_sample_count), last_sample_count_attachment, string_VkSampleCountFlagBits(last_sample_count)); } } last_sample_count_attachment = j; if (subpass_performs_resolve && current_sample_count == VK_SAMPLE_COUNT_1_BIT) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03066" : "VUID-VkSubpassDescription-pResolveAttachments-00848"; skip |= LogError(device, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "VK_SAMPLE_COUNT_1_BIT.", function_name, i, attachment_index); } if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount) { const auto depth_stencil_sample_count = pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples; if (device_extensions.vk_amd_mixed_attachment_samples) { if (current_sample_count > depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-03070" : "VUID-VkSubpassDescription-pColorAttachments-01506"; skip |= LogError(device, vuid, "%s: %s has %s which is larger than depth/stencil attachment %s.", function_name, error_type.c_str(), string_VkSampleCountFlagBits(current_sample_count), string_VkSampleCountFlagBits(depth_stencil_sample_count)); break; } } if (!device_extensions.vk_amd_mixed_attachment_samples && !device_extensions.vk_nv_framebuffer_mixed_samples && current_sample_count != depth_stencil_sample_count) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pDepthStencilAttachment-03071" : "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"; skip |= LogError(device, vuid, "%s: Subpass %u attempts to render to use a depth/stencil attachment with sample " "count that differs " "from color attachment %u." "The depth attachment ref has sample count %s, whereas color attachment ref %u has " "sample count %s.", function_name, i, j, string_VkSampleCountFlagBits(depth_stencil_sample_count), j, string_VkSampleCountFlagBits(current_sample_count)); break; } } const VkFormatFeatureFlags format_features = GetPotentialFormatFeatures(attachment_format); if ((format_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pColorAttachments-02898" : "VUID-VkSubpassDescription-pColorAttachments-02648"; skip |= LogError(device, vuid, "%s: Color attachment %s format (%s) does not contain " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT.", function_name, error_type.c_str(), string_VkFormat(attachment_format)); } if (attach_first_use[attachment_index]) { skip |= ValidateLayoutVsAttachmentDescription(report_data, rp_version, subpass.pColorAttachments[j].layout, attachment_index, pCreateInfo->pAttachments[attachment_index]); } attach_first_use[attachment_index] = false; } // Check for valid imageLayout vuid = use_rp2 ? "VUID-VkSubpassDescription2-None-04439" : "VUID-VkSubpassDescription-None-04437"; switch (attachment_ref.layout) { case VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR: case VK_IMAGE_LAYOUT_GENERAL: break; // valid layouts case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR: if (input_attachments.find(attachment_index) != input_attachments.end()) { skip |= LogError(device, vuid, "%s: %s is also an input attachment so the layout (%s) must not be " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL or VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR.", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); } break; default: skip |= LogError(device, vuid, "%s: %s layout is %s but color attachments must be " "VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, " "VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR, " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or " "VK_IMAGE_LAYOUT_GENERAL.", function_name, error_type.c_str(), string_VkImageLayout(attachment_ref.layout)); break; } } if (subpass_performs_resolve && subpass.pResolveAttachments[j].attachment != VK_ATTACHMENT_UNUSED && subpass.pResolveAttachments[j].attachment < pCreateInfo->attachmentCount) { if (attachment_index == VK_ATTACHMENT_UNUSED) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03065" : "VUID-VkSubpassDescription-pResolveAttachments-00847"; skip |= LogError(device, vuid, "%s: Subpass %u requests multisample resolve from attachment %u which has " "attachment=VK_ATTACHMENT_UNUSED.", function_name, i, attachment_index); } else { const auto &color_desc = pCreateInfo->pAttachments[attachment_index]; const auto &resolve_desc = pCreateInfo->pAttachments[subpass.pResolveAttachments[j].attachment]; if (color_desc.format != resolve_desc.format) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-pResolveAttachments-03068" : "VUID-VkSubpassDescription-pResolveAttachments-00850"; skip |= LogError(device, vuid, "%s: %s resolves to an attachment with a " "different format. color format: %u, resolve format: %u.", function_name, error_type.c_str(), color_desc.format, resolve_desc.format); } } } } } return skip; } bool CoreChecks::ValidateCreateRenderPass(VkDevice device, RenderPassCreateVersion rp_version, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; skip |= ValidateRenderpassAttachmentUsage(rp_version, pCreateInfo, function_name); skip |= ValidateRenderPassDAG(rp_version, pCreateInfo); // Validate multiview correlation and view masks bool view_mask_zero = false; bool view_mask_non_zero = false; for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; if (subpass.viewMask != 0) { view_mask_non_zero = true; } else { view_mask_zero = true; } if ((subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX) != 0 && (subpass.flags & VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX) == 0) { vuid = use_rp2 ? "VUID-VkSubpassDescription2-flags-03076" : "VUID-VkSubpassDescription-flags-00856"; skip |= LogError(device, vuid, "%s: The flags parameter of subpass description %u includes " "VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX but does not also include " "VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX.", function_name, i); } } if (rp_version == RENDER_PASS_VERSION_2) { if (view_mask_non_zero && view_mask_zero) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03058", "%s: Some view masks are non-zero whilst others are zero.", function_name); } if (view_mask_zero && pCreateInfo->correlatedViewMaskCount != 0) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-viewMask-03057", "%s: Multiview is not enabled but correlation masks are still provided", function_name); } } uint32_t aggregated_cvms = 0; for (uint32_t i = 0; i < pCreateInfo->correlatedViewMaskCount; ++i) { if (aggregated_cvms & pCreateInfo->pCorrelatedViewMasks[i]) { vuid = use_rp2 ? "VUID-VkRenderPassCreateInfo2-pCorrelatedViewMasks-03056" : "VUID-VkRenderPassMultiviewCreateInfo-pCorrelationMasks-00841"; skip |= LogError(device, vuid, "%s: pCorrelatedViewMasks[%u] contains a previously appearing view bit.", function_name, i); } aggregated_cvms |= pCreateInfo->pCorrelatedViewMasks[i]; } LogObjectList objects(device); auto func_name = use_rp2 ? Func::vkCreateRenderPass2 : Func::vkCreateRenderPass; auto structure = use_rp2 ? Struct::VkSubpassDependency2 : Struct::VkSubpassDependency; for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) { auto const &dependency = pCreateInfo->pDependencies[i]; Location loc(func_name, structure, Field::pDependencies, i); skip |= ValidateSubpassDependency(objects, loc, dependency); } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { bool skip = false; // Handle extension structs from KHR_multiview and KHR_maintenance2 that can only be validated for RP1 (indices out of bounds) const VkRenderPassMultiviewCreateInfo *multiview_info = LvlFindInChain<VkRenderPassMultiviewCreateInfo>(pCreateInfo->pNext); if (multiview_info) { if (multiview_info->subpassCount && multiview_info->subpassCount != pCreateInfo->subpassCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01928", "vkCreateRenderPass(): Subpass count is %u but multiview info has a subpass count of %u.", pCreateInfo->subpassCount, multiview_info->subpassCount); } else if (multiview_info->dependencyCount && multiview_info->dependencyCount != pCreateInfo->dependencyCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01929", "vkCreateRenderPass(): Dependency count is %u but multiview info has a dependency count of %u.", pCreateInfo->dependencyCount, multiview_info->dependencyCount); } } const VkRenderPassInputAttachmentAspectCreateInfo *input_attachment_aspect_info = LvlFindInChain<VkRenderPassInputAttachmentAspectCreateInfo>(pCreateInfo->pNext); if (input_attachment_aspect_info) { for (uint32_t i = 0; i < input_attachment_aspect_info->aspectReferenceCount; ++i) { uint32_t subpass = input_attachment_aspect_info->pAspectReferences[i].subpass; uint32_t attachment = input_attachment_aspect_info->pAspectReferences[i].inputAttachmentIndex; if (subpass >= pCreateInfo->subpassCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01926", "vkCreateRenderPass(): Subpass index %u specified by input attachment aspect info %u is greater " "than the subpass " "count of %u for this render pass.", subpass, i, pCreateInfo->subpassCount); } else if (pCreateInfo->pSubpasses && attachment >= pCreateInfo->pSubpasses[subpass].inputAttachmentCount) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pNext-01927", "vkCreateRenderPass(): Input attachment index %u specified by input attachment aspect info %u is " "greater than the " "input attachment count of %u for this subpass.", attachment, i, pCreateInfo->pSubpasses[subpass].inputAttachmentCount); } } } const VkRenderPassFragmentDensityMapCreateInfoEXT *fragment_density_map_info = LvlFindInChain<VkRenderPassFragmentDensityMapCreateInfoEXT>(pCreateInfo->pNext); if (fragment_density_map_info) { if (fragment_density_map_info->fragmentDensityMapAttachment.attachment != VK_ATTACHMENT_UNUSED) { if (fragment_density_map_info->fragmentDensityMapAttachment.attachment >= pCreateInfo->attachmentCount) { skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02547", "vkCreateRenderPass(): fragmentDensityMapAttachment %u must be less than attachmentCount %u of " "for this render pass.", fragment_density_map_info->fragmentDensityMapAttachment.attachment, pCreateInfo->attachmentCount); } else { if (!(fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT || fragment_density_map_info->fragmentDensityMapAttachment.layout == VK_IMAGE_LAYOUT_GENERAL)) { skip |= LogError(device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02549", "vkCreateRenderPass(): Layout of fragmentDensityMapAttachment %u' must be equal to " "VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT, or VK_IMAGE_LAYOUT_GENERAL.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } if (!(pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD || pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].loadOp == VK_ATTACHMENT_LOAD_OP_DONT_CARE)) { skip |= LogError( device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02550", "vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a loadOp " "equal to VK_ATTACHMENT_LOAD_OP_LOAD or VK_ATTACHMENT_LOAD_OP_DONT_CARE.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } if (pCreateInfo->pAttachments[fragment_density_map_info->fragmentDensityMapAttachment.attachment].storeOp != VK_ATTACHMENT_STORE_OP_DONT_CARE) { skip |= LogError( device, "VUID-VkRenderPassFragmentDensityMapCreateInfoEXT-fragmentDensityMapAttachment-02551", "vkCreateRenderPass(): FragmentDensityMapAttachment %u' must reference an attachment with a storeOp " "equal to VK_ATTACHMENT_STORE_OP_DONT_CARE.", fragment_density_map_info->fragmentDensityMapAttachment.attachment); } } } } if (!skip) { safe_VkRenderPassCreateInfo2 create_info_2; ConvertVkRenderPassCreateInfoToV2KHR(*pCreateInfo, &create_info_2); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_1, create_info_2.ptr(), "vkCreateRenderPass()"); } return skip; } bool CoreChecks::ValidateDepthStencilResolve(const VkPhysicalDeviceVulkan12Properties &core12_props, const VkRenderPassCreateInfo2 *pCreateInfo, const char *function_name) const { bool skip = false; // If the pNext list of VkSubpassDescription2 includes a VkSubpassDescriptionDepthStencilResolve structure, // then that structure describes depth/stencil resolve operations for the subpass. for (uint32_t i = 0; i < pCreateInfo->subpassCount; i++) { const VkSubpassDescription2 &subpass = pCreateInfo->pSubpasses[i]; const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass.pNext); if (resolve == nullptr) { continue; } const bool resolve_attachment_not_unused = (resolve->pDepthStencilResolveAttachment != nullptr && resolve->pDepthStencilResolveAttachment->attachment != VK_ATTACHMENT_UNUSED); const bool valid_resolve_attachment_index = (resolve_attachment_not_unused && resolve->pDepthStencilResolveAttachment->attachment < pCreateInfo->attachmentCount); const bool ds_attachment_not_unused = (subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED); const bool valid_ds_attachment_index = (ds_attachment_not_unused && subpass.pDepthStencilAttachment->attachment < pCreateInfo->attachmentCount); if (resolve_attachment_not_unused && subpass.pDepthStencilAttachment != nullptr && subpass.pDepthStencilAttachment->attachment == VK_ATTACHMENT_UNUSED) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03177", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u, but pDepthStencilAttachment=VK_ATTACHMENT_UNUSED.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } if (resolve_attachment_not_unused && resolve->depthResolveMode == VK_RESOLVE_MODE_NONE && resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03178", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u, but both depth and stencil resolve modes are " "VK_RESOLVE_MODE_NONE.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } if (resolve_attachment_not_unused && valid_ds_attachment_index && pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples == VK_SAMPLE_COUNT_1_BIT) { skip |= LogError( device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03179", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u. However pDepthStencilAttachment has sample count=VK_SAMPLE_COUNT_1_BIT.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } if (valid_resolve_attachment_index && pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].samples != VK_SAMPLE_COUNT_1_BIT) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03180", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u which has sample count=VK_SAMPLE_COUNT_1_BIT.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } VkFormat depth_stencil_attachment_format = (valid_ds_attachment_index ? pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].format : VK_FORMAT_UNDEFINED); VkFormat depth_stencil_resolve_attachment_format = (valid_resolve_attachment_index ? pCreateInfo->pAttachments[resolve->pDepthStencilResolveAttachment->attachment].format : VK_FORMAT_UNDEFINED); if (valid_ds_attachment_index && valid_resolve_attachment_index) { const auto resolve_depth_size = FormatDepthSize(depth_stencil_resolve_attachment_format); const auto resolve_stencil_size = FormatStencilSize(depth_stencil_resolve_attachment_format); if (resolve_depth_size > 0 && ((FormatDepthSize(depth_stencil_attachment_format) != resolve_depth_size) || (FormatDepthNumericalType(depth_stencil_attachment_format) != FormatDepthNumericalType(depth_stencil_resolve_attachment_format)))) { skip |= LogError( device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03181", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u which has a depth component (size %u). The depth component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_depth_size, FormatDepthSize(depth_stencil_attachment_format)); } if (resolve_stencil_size > 0 && ((FormatStencilSize(depth_stencil_attachment_format) != resolve_stencil_size) || (FormatStencilNumericalType(depth_stencil_attachment_format) != FormatStencilNumericalType(depth_stencil_resolve_attachment_format)))) { skip |= LogError( device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03182", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with resolve attachment %u which has a stencil component (size %u). The stencil component " "of pDepthStencilAttachment must have the same number of bits (currently %u) and the same numerical type.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment, resolve_stencil_size, FormatStencilSize(depth_stencil_attachment_format)); } } if (!(resolve->depthResolveMode == VK_RESOLVE_MODE_NONE || resolve->depthResolveMode & core12_props.supportedDepthResolveModes)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-depthResolveMode-03183", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with invalid depthResolveMode=%u.", function_name, i, resolve->depthResolveMode); } if (!(resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE || resolve->stencilResolveMode & core12_props.supportedStencilResolveModes)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-stencilResolveMode-03184", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure with invalid stencilResolveMode=%u.", function_name, i, resolve->stencilResolveMode); } if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) && core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_FALSE && !(resolve->depthResolveMode == resolve->stencilResolveMode)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03185", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical.", function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode); } if (valid_resolve_attachment_index && FormatIsDepthAndStencil(depth_stencil_resolve_attachment_format) && core12_props.independentResolve == VK_FALSE && core12_props.independentResolveNone == VK_TRUE && !(resolve->depthResolveMode == resolve->stencilResolveMode || resolve->depthResolveMode == VK_RESOLVE_MODE_NONE || resolve->stencilResolveMode == VK_RESOLVE_MODE_NONE)) { skip |= LogError(device, "VUID-VkSubpassDescriptionDepthStencilResolve-pDepthStencilResolveAttachment-03186", "%s: Subpass %u includes a VkSubpassDescriptionDepthStencilResolve " "structure. The values of depthResolveMode (%u) and stencilResolveMode (%u) must be identical, or " "one of them must be %u.", function_name, i, resolve->depthResolveMode, resolve->stencilResolveMode, VK_RESOLVE_MODE_NONE); } // VK_QCOM_render_pass_shader_resolve check of depth/stencil attachmnent if (((subpass.flags & VK_SUBPASS_DESCRIPTION_SHADER_RESOLVE_BIT_QCOM) != 0) && (resolve_attachment_not_unused)) { skip |= LogError(device, "VUID-VkSubpassDescription-flags-03342", "%s: Subpass %u enables shader resolve, which requires the depth/stencil resolve attachment" " must be VK_ATTACHMENT_UNUSED, but a reference to attachment %u was found instead.", function_name, i, resolve->pDepthStencilResolveAttachment->attachment); } } return skip; } bool CoreChecks::ValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, const char *function_name) const { bool skip = false; if (device_extensions.vk_khr_depth_stencil_resolve) { skip |= ValidateDepthStencilResolve(phys_dev_props_core12, pCreateInfo, function_name); } skip |= ValidateFragmentShadingRateAttachments(device, pCreateInfo); safe_VkRenderPassCreateInfo2 create_info_2(pCreateInfo); skip |= ValidateCreateRenderPass(device, RENDER_PASS_VERSION_2, create_info_2.ptr(), function_name); return skip; } bool CoreChecks::ValidateFragmentShadingRateAttachments(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo) const { bool skip = false; if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { for (uint32_t attachment_description = 0; attachment_description < pCreateInfo->attachmentCount; ++attachment_description) { std::vector<uint32_t> used_as_fragment_shading_rate_attachment; // Prepass to find any use as a fragment shading rate attachment structures and validate them independently for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { const VkFragmentShadingRateAttachmentInfoKHR *fragment_shading_rate_attachment = LvlFindInChain<VkFragmentShadingRateAttachmentInfoKHR>(pCreateInfo->pSubpasses[subpass].pNext); if (fragment_shading_rate_attachment && fragment_shading_rate_attachment->pFragmentShadingRateAttachment) { const VkAttachmentReference2 &attachment_reference = *(fragment_shading_rate_attachment->pFragmentShadingRateAttachment); if (attachment_reference.attachment == attachment_description) { used_as_fragment_shading_rate_attachment.push_back(subpass); } if (((pCreateInfo->flags & VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM) != 0) && (attachment_reference.attachment != VK_ATTACHMENT_UNUSED)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-flags-04521", "vkCreateRenderPass2: Render pass includes VK_RENDER_PASS_CREATE_TRANSFORM_BIT_QCOM but " "a fragment shading rate attachment is specified in subpass %u.", subpass); } if (attachment_reference.attachment != VK_ATTACHMENT_UNUSED) { const VkFormatFeatureFlags potential_format_features = GetPotentialFormatFeatures(pCreateInfo->pAttachments[attachment_reference.attachment].format); if (!(potential_format_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) { skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-04586", "vkCreateRenderPass2: Attachment description %u is used in subpass %u as a fragment " "shading rate attachment, but specifies format %s, which does not support " "VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.", attachment_reference.attachment, subpass, string_VkFormat(pCreateInfo->pAttachments[attachment_reference.attachment].format)); } if (attachment_reference.layout != VK_IMAGE_LAYOUT_GENERAL && attachment_reference.layout != VK_IMAGE_LAYOUT_FRAGMENT_SHADING_RATE_ATTACHMENT_OPTIMAL_KHR) { skip |= LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04524", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u specifies a layout of %s.", subpass, string_VkImageLayout(attachment_reference.layout)); } if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width)) { skip |= LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04525", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a " "non-power-of-two texel width of %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width < phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04526", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which " "is lower than the advertised minimum width %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.width); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04527", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel width of %u which " "is higher than the advertised maximum width %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.width); } if (!IsPowerOfTwo(fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height)) { skip |= LogError(device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04528", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a " "non-power-of-two texel height of %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height < phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04529", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u " "which is lower than the advertised minimum height %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, phys_dev_ext_props.fragment_shading_rate_props.minFragmentShadingRateAttachmentTexelSize.height); } if (fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04530", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel height of %u " "which is higher than the advertised maximum height %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSize.height); } uint32_t aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width / fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height; uint32_t inverse_aspect_ratio = fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height / fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width; if (aspect_ratio > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04531", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, " "which has an aspect ratio %u, which is higher than the advertised maximum aspect ratio %u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, aspect_ratio, phys_dev_ext_props.fragment_shading_rate_props .maxFragmentShadingRateAttachmentTexelSizeAspectRatio); } if (inverse_aspect_ratio > phys_dev_ext_props.fragment_shading_rate_props.maxFragmentShadingRateAttachmentTexelSizeAspectRatio) { LogError( device, "VUID-VkFragmentShadingRateAttachmentInfoKHR-pFragmentShadingRateAttachment-04532", "vkCreateRenderPass2: Fragment shading rate attachment in subpass %u has a texel size of %u by %u, " "which has an inverse aspect ratio of %u, which is higher than the advertised maximum aspect ratio " "%u.", subpass, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.width, fragment_shading_rate_attachment->shadingRateAttachmentTexelSize.height, inverse_aspect_ratio, phys_dev_ext_props.fragment_shading_rate_props .maxFragmentShadingRateAttachmentTexelSizeAspectRatio); } } } } // Lambda function turning a vector of integers into a string auto vector_to_string = [&](std::vector<uint32_t> vector) { std::stringstream ss; size_t size = vector.size(); for (size_t i = 0; i < used_as_fragment_shading_rate_attachment.size(); i++) { if (size == 2 && i == 1) { ss << " and "; } else if (size > 2 && i == size - 2) { ss << ", and "; } else if (i != 0) { ss << ", "; } ss << vector[i]; } return ss.str(); }; // Search for other uses of the same attachment if (!used_as_fragment_shading_rate_attachment.empty()) { for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { const VkSubpassDescription2 &subpass_info = pCreateInfo->pSubpasses[subpass]; const VkSubpassDescriptionDepthStencilResolve *depth_stencil_resolve_attachment = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(subpass_info.pNext); std::string fsr_attachment_subpasses_string = vector_to_string(used_as_fragment_shading_rate_attachment); for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) { if (subpass_info.pColorAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as color attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } for (uint32_t attachment = 0; attachment < subpass_info.colorAttachmentCount; ++attachment) { if (subpass_info.pResolveAttachments && subpass_info.pResolveAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as color resolve attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } for (uint32_t attachment = 0; attachment < subpass_info.inputAttachmentCount; ++attachment) { if (subpass_info.pInputAttachments[attachment].attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as input attachment %u in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), attachment, subpass); } } if (subpass_info.pDepthStencilAttachment) { if (subpass_info.pDepthStencilAttachment->attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as the depth/stencil attachment in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), subpass); } } if (depth_stencil_resolve_attachment && depth_stencil_resolve_attachment->pDepthStencilResolveAttachment) { if (depth_stencil_resolve_attachment->pDepthStencilResolveAttachment->attachment == attachment_description) { skip |= LogError( device, "VUID-VkRenderPassCreateInfo2-pAttachments-04585", "vkCreateRenderPass2: Attachment description %u is used as a fragment shading rate attachment in " "subpass(es) %s but also as the depth/stencil resolve attachment in subpass %u", attachment_description, fsr_attachment_subpasses_string.c_str(), subpass); } } } } } } return skip; } bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2KHR()"); } bool CoreChecks::PreCallValidateCreateRenderPass2(VkDevice device, const VkRenderPassCreateInfo2 *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) const { return ValidateCreateRenderPass2(device, pCreateInfo, pAllocator, pRenderPass, "vkCreateRenderPass2()"); } bool CoreChecks::ValidatePrimaryCommandBuffer(const CMD_BUFFER_STATE *pCB, char const *cmd_name, const char *error_code) const { bool skip = false; if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) { skip |= LogError(pCB->commandBuffer(), error_code, "Cannot execute command %s on a secondary command buffer.", cmd_name); } return skip; } bool CoreChecks::VerifyRenderAreaBounds(const VkRenderPassBeginInfo *pRenderPassBegin) const { bool skip = false; const safe_VkFramebufferCreateInfo *framebuffer_info = &GetFramebufferState(pRenderPassBegin->framebuffer)->createInfo; if (pRenderPassBegin->renderArea.offset.x < 0 || (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > framebuffer_info->width || pRenderPassBegin->renderArea.offset.y < 0 || (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > framebuffer_info->height) { skip |= static_cast<bool>(LogError( pRenderPassBegin->renderPass, kVUID_Core_DrawState_InvalidRenderArea, "Cannot execute a render pass with renderArea not within the bound of the framebuffer. RenderArea: x %d, y %d, width " "%d, height %d. Framebuffer: width %d, height %d.", pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, pRenderPassBegin->renderArea.extent.height, framebuffer_info->width, framebuffer_info->height)); } return skip; } bool CoreChecks::VerifyFramebufferAndRenderPassImageViews(const VkRenderPassBeginInfo *pRenderPassBeginInfo, const char *func_name) const { bool skip = false; const VkRenderPassAttachmentBeginInfo *render_pass_attachment_begin_info = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBeginInfo->pNext); if (render_pass_attachment_begin_info && render_pass_attachment_begin_info->attachmentCount != 0) { const safe_VkFramebufferCreateInfo *framebuffer_create_info = &GetFramebufferState(pRenderPassBeginInfo->framebuffer)->createInfo; const VkFramebufferAttachmentsCreateInfo *framebuffer_attachments_create_info = LvlFindInChain<VkFramebufferAttachmentsCreateInfo>(framebuffer_create_info->pNext); if ((framebuffer_create_info->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) == 0) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03207", "%s: Image views specified at render pass begin, but framebuffer not created with " "VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT", func_name); } else if (framebuffer_attachments_create_info) { if (framebuffer_attachments_create_info->attachmentImageInfoCount != render_pass_attachment_begin_info->attachmentCount) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03208", "%s: %u image views specified at render pass begin, but framebuffer " "created expecting %u attachments", func_name, render_pass_attachment_begin_info->attachmentCount, framebuffer_attachments_create_info->attachmentImageInfoCount); } else { const safe_VkRenderPassCreateInfo2 *render_pass_create_info = &GetRenderPassState(pRenderPassBeginInfo->renderPass)->createInfo; for (uint32_t i = 0; i < render_pass_attachment_begin_info->attachmentCount; ++i) { const auto image_view_state = GetImageViewState(render_pass_attachment_begin_info->pAttachments[i]); const VkImageViewCreateInfo *image_view_create_info = &image_view_state->create_info; const auto &subresource_range = image_view_state->normalized_subresource_range; const VkFramebufferAttachmentImageInfo *framebuffer_attachment_image_info = &framebuffer_attachments_create_info->pAttachmentImageInfos[i]; const VkImageCreateInfo *image_create_info = &GetImageState(image_view_create_info->image)->createInfo; if (framebuffer_attachment_image_info->flags != image_create_info->flags) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03209", "%s: Image view #%u created from an image with flags set as 0x%X, " "but image info #%u used to create the framebuffer had flags set as 0x%X", func_name, i, image_create_info->flags, i, framebuffer_attachment_image_info->flags); } if (framebuffer_attachment_image_info->usage != image_view_state->inherited_usage) { // Give clearer message if this error is due to the "inherited" part or not if (image_create_info->usage == image_view_state->inherited_usage) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627", "%s: Image view #%u created from an image with usage set as 0x%X, " "but image info #%u used to create the framebuffer had usage set as 0x%X", func_name, i, image_create_info->usage, i, framebuffer_attachment_image_info->usage); } else { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-04627", "%s: Image view #%u created from an image with usage set as 0x%X but using " "VkImageViewUsageCreateInfo the inherited usage is the subset 0x%X " "and the image info #%u used to create the framebuffer had usage set as 0x%X", func_name, i, image_create_info->usage, image_view_state->inherited_usage, i, framebuffer_attachment_image_info->usage); } } uint32_t view_width = image_create_info->extent.width >> subresource_range.baseMipLevel; if (framebuffer_attachment_image_info->width != view_width) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03211", "%s: Image view #%u created from an image subresource with width set as %u, " "but image info #%u used to create the framebuffer had width set as %u", func_name, i, view_width, i, framebuffer_attachment_image_info->width); } uint32_t view_height = image_create_info->extent.height >> subresource_range.baseMipLevel; if (framebuffer_attachment_image_info->height != view_height) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03212", "%s: Image view #%u created from an image subresource with height set as %u, " "but image info #%u used to create the framebuffer had height set as %u", func_name, i, view_height, i, framebuffer_attachment_image_info->height); } if (framebuffer_attachment_image_info->layerCount != subresource_range.layerCount) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03213", "%s: Image view #%u created with a subresource range with a layerCount of %u, " "but image info #%u used to create the framebuffer had layerCount set as %u", func_name, i, subresource_range.layerCount, i, framebuffer_attachment_image_info->layerCount); } const VkImageFormatListCreateInfo *image_format_list_create_info = LvlFindInChain<VkImageFormatListCreateInfo>(image_create_info->pNext); if (image_format_list_create_info) { if (image_format_list_create_info->viewFormatCount != framebuffer_attachment_image_info->viewFormatCount) { skip |= LogError( pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03214", "VkRenderPassBeginInfo: Image view #%u created with an image with a viewFormatCount of %u, " "but image info #%u used to create the framebuffer had viewFormatCount set as %u", i, image_format_list_create_info->viewFormatCount, i, framebuffer_attachment_image_info->viewFormatCount); } for (uint32_t j = 0; j < image_format_list_create_info->viewFormatCount; ++j) { bool format_found = false; for (uint32_t k = 0; k < framebuffer_attachment_image_info->viewFormatCount; ++k) { if (image_format_list_create_info->pViewFormats[j] == framebuffer_attachment_image_info->pViewFormats[k]) { format_found = true; } } if (!format_found) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03215", "VkRenderPassBeginInfo: Image view #%u created with an image including the format " "%s in its view format list, " "but image info #%u used to create the framebuffer does not include this format", i, string_VkFormat(image_format_list_create_info->pViewFormats[j]), i); } } } if (render_pass_create_info->pAttachments[i].format != image_view_create_info->format) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03216", "%s: Image view #%u created with a format of %s, " "but render pass attachment description #%u created with a format of %s", func_name, i, string_VkFormat(image_view_create_info->format), i, string_VkFormat(render_pass_create_info->pAttachments[i].format)); } if (render_pass_create_info->pAttachments[i].samples != image_create_info->samples) { skip |= LogError(pRenderPassBeginInfo->renderPass, "VUID-VkRenderPassBeginInfo-framebuffer-03217", "%s: Image view #%u created with an image with %s samples, " "but render pass attachment description #%u created with %s samples", func_name, i, string_VkSampleCountFlagBits(image_create_info->samples), i, string_VkSampleCountFlagBits(render_pass_create_info->pAttachments[i].samples)); } if (subresource_range.levelCount != 1) { skip |= LogError(render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03218", "%s: Image view #%u created with multiple (%u) mip levels.", func_name, i, subresource_range.levelCount); } if (IsIdentitySwizzle(image_view_create_info->components) == false) { skip |= LogError( render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-03219", "%s: Image view #%u created with non-identity swizzle. All " "framebuffer attachments must have been created with the identity swizzle. Here are the actual " "swizzle values:\n" "r swizzle = %s\n" "g swizzle = %s\n" "b swizzle = %s\n" "a swizzle = %s\n", func_name, i, string_VkComponentSwizzle(image_view_create_info->components.r), string_VkComponentSwizzle(image_view_create_info->components.g), string_VkComponentSwizzle(image_view_create_info->components.b), string_VkComponentSwizzle(image_view_create_info->components.a)); } if (image_view_create_info->viewType == VK_IMAGE_VIEW_TYPE_3D) { skip |= LogError(render_pass_attachment_begin_info->pAttachments[i], "VUID-VkRenderPassAttachmentBeginInfo-pAttachments-04114", "%s: Image view #%u created with type VK_IMAGE_VIEW_TYPE_3D", func_name, i); } } } } } return skip; } // If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the // [load|store]Op flag must be checked // TODO: The memory valid flag in DEVICE_MEMORY_STATE should probably be split to track the validity of stencil memory separately. template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) { if (color_depth_op != op && stencil_op != op) { return false; } bool check_color_depth_load_op = !FormatIsStencilOnly(format); bool check_stencil_load_op = FormatIsDepthAndStencil(format) || !check_color_depth_load_op; return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op))); } bool CoreChecks::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *const function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()"; if (render_pass_state) { uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR // Handle extension struct from EXT_sample_locations const VkRenderPassSampleLocationsBeginInfoEXT *sample_locations_begin_info = LvlFindInChain<VkRenderPassSampleLocationsBeginInfoEXT>(pRenderPassBegin->pNext); if (sample_locations_begin_info) { for (uint32_t i = 0; i < sample_locations_begin_info->attachmentInitialSampleLocationsCount; ++i) { const VkAttachmentSampleLocationsEXT &sample_location = sample_locations_begin_info->pAttachmentInitialSampleLocations[i]; skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name); if (sample_location.attachmentIndex >= render_pass_state->createInfo.attachmentCount) { skip |= LogError(device, "VUID-VkAttachmentSampleLocationsEXT-attachmentIndex-01531", "%s: Attachment index %u specified by attachment sample locations %u is greater than the " "attachment count of %u for the render pass being begun.", function_name, sample_location.attachmentIndex, i, render_pass_state->createInfo.attachmentCount); } } for (uint32_t i = 0; i < sample_locations_begin_info->postSubpassSampleLocationsCount; ++i) { const VkSubpassSampleLocationsEXT &sample_location = sample_locations_begin_info->pPostSubpassSampleLocations[i]; skip |= ValidateSampleLocationsInfo(&sample_location.sampleLocationsInfo, function_name); if (sample_location.subpassIndex >= render_pass_state->createInfo.subpassCount) { skip |= LogError(device, "VUID-VkSubpassSampleLocationsEXT-subpassIndex-01532", "%s: Subpass index %u specified by subpass sample locations %u is greater than the subpass count " "of %u for the render pass being begun.", function_name, sample_location.subpassIndex, i, render_pass_state->createInfo.subpassCount); } } } for (uint32_t i = 0; i < render_pass_state->createInfo.attachmentCount; ++i) { auto attachment = &render_pass_state->createInfo.pAttachments[i]; if (FormatSpecificLoadAndStoreOpSettings(attachment->format, attachment->loadOp, attachment->stencilLoadOp, VK_ATTACHMENT_LOAD_OP_CLEAR)) { clear_op_size = static_cast<uint32_t>(i) + 1; if (FormatHasDepth(attachment->format)) { skip |= ValidateClearDepthStencilValue(commandBuffer, pRenderPassBegin->pClearValues[i].depthStencil, function_name); } } } if (clear_op_size > pRenderPassBegin->clearValueCount) { skip |= LogError(render_pass_state->renderPass(), "VUID-VkRenderPassBeginInfo-clearValueCount-00902", "In %s the VkRenderPassBeginInfo struct has a clearValueCount of %u but there " "must be at least %u entries in pClearValues array to account for the highest index attachment in " "%s that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array is indexed by " "attachment number so even if some pClearValues entries between 0 and %u correspond to attachments " "that aren't cleared they will be ignored.", function_name, pRenderPassBegin->clearValueCount, clear_op_size, report_data->FormatHandle(render_pass_state->renderPass()).c_str(), clear_op_size, clear_op_size - 1); } skip |= VerifyFramebufferAndRenderPassImageViews(pRenderPassBegin, function_name); skip |= VerifyRenderAreaBounds(pRenderPassBegin); skip |= VerifyFramebufferAndRenderPassLayouts(rp_version, cb_state, pRenderPassBegin, GetFramebufferState(pRenderPassBegin->framebuffer)); if (framebuffer->rp_state->renderPass() != render_pass_state->renderPass()) { skip |= ValidateRenderPassCompatibility("render pass", render_pass_state, "framebuffer", framebuffer->rp_state.get(), function_name, "VUID-VkRenderPassBeginInfo-renderPass-00904"); } skip |= ValidateDependencies(framebuffer, render_pass_state); const CMD_TYPE cmd_type = use_rp2 ? CMD_BEGINRENDERPASS2 : CMD_BEGINRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); } auto chained_device_group_struct = LvlFindInChain<VkDeviceGroupRenderPassBeginInfo>(pRenderPassBegin->pNext); if (chained_device_group_struct) { skip |= ValidateDeviceMaskToPhysicalDeviceCount(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00905"); skip |= ValidateDeviceMaskToZero(chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00906"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state, chained_device_group_struct->deviceMask, pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceMask-00907"); if (chained_device_group_struct->deviceRenderAreaCount != 0 && chained_device_group_struct->deviceRenderAreaCount != physical_device_count) { skip |= LogError(pRenderPassBegin->renderPass, "VUID-VkDeviceGroupRenderPassBeginInfo-deviceRenderAreaCount-00908", "%s: deviceRenderAreaCount[%" PRIu32 "] is invaild. Physical device count is %" PRIu32 ".", function_name, chained_device_group_struct->deviceRenderAreaCount, physical_device_count); } } return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } bool CoreChecks::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) const { bool skip = ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } void CoreChecks::RecordCmdBeginRenderPassLayouts(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); auto render_pass_state = pRenderPassBegin ? GetRenderPassState(pRenderPassBegin->renderPass) : nullptr; auto framebuffer = pRenderPassBegin ? GetFramebufferState(pRenderPassBegin->framebuffer) : nullptr; if (render_pass_state) { // transition attachments to the correct layouts for beginning of renderPass and first subpass TransitionBeginRenderPassLayouts(cb_state, render_pass_state, framebuffer); } } void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) { StateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) { StateTracker::PreCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } void CoreChecks::PreCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassBeginInfo *pSubpassBeginInfo) { StateTracker::PreCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPassLayouts(commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdNextSubpass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdNextSubpass2()" : "vkCmdNextSubpass()"; const CMD_TYPE cmd_type = use_rp2 ? CMD_NEXTSUBPASS2 : CMD_NEXTSUBPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); auto subpass_count = cb_state->activeRenderPass->createInfo.subpassCount; if (cb_state->activeSubpass == subpass_count - 1) { vuid = use_rp2 ? "VUID-vkCmdNextSubpass2-None-03102" : "VUID-vkCmdNextSubpass-None-00909"; skip |= LogError(commandBuffer, vuid, "%s: Attempted to advance beyond final subpass.", function_name); } return skip; } bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_1, commandBuffer); } bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer); } bool CoreChecks::PreCallValidateCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) const { return ValidateCmdNextSubpass(RENDER_PASS_VERSION_2, commandBuffer); } void CoreChecks::RecordCmdNextSubpassLayouts(VkCommandBuffer commandBuffer, VkSubpassContents contents) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); TransitionSubpassLayouts(cb_state, cb_state->activeRenderPass.get(), cb_state->activeSubpass, Get<FRAMEBUFFER_STATE>(cb_state->activeRenderPassBeginInfo.framebuffer)); } void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) { StateTracker::PostCallRecordCmdNextSubpass(commandBuffer, contents); RecordCmdNextSubpassLayouts(commandBuffer, contents); } void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) { StateTracker::PostCallRecordCmdNextSubpass2KHR(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents); } void CoreChecks::PostCallRecordCmdNextSubpass2(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo *pSubpassBeginInfo, const VkSubpassEndInfo *pSubpassEndInfo) { StateTracker::PostCallRecordCmdNextSubpass2(commandBuffer, pSubpassBeginInfo, pSubpassEndInfo); RecordCmdNextSubpassLayouts(commandBuffer, pSubpassBeginInfo->contents); } bool CoreChecks::ValidateCmdEndRenderPass(RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2); const char *vuid; const char *const function_name = use_rp2 ? "vkCmdEndRenderPass2KHR()" : "vkCmdEndRenderPass()"; RENDER_PASS_STATE *rp_state = cb_state->activeRenderPass.get(); if (rp_state) { if (cb_state->activeSubpass != rp_state->createInfo.subpassCount - 1) { vuid = use_rp2 ? "VUID-vkCmdEndRenderPass2-None-03103" : "VUID-vkCmdEndRenderPass-None-00910"; skip |= LogError(commandBuffer, vuid, "%s: Called before reaching final subpass.", function_name); } } const CMD_TYPE cmd_type = use_rp2 ? CMD_ENDRENDERPASS2 : CMD_ENDRENDERPASS; skip |= ValidateCmd(cb_state, cmd_type, function_name); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_1, commandBuffer); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer); return skip; } bool CoreChecks::PreCallValidateCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) const { bool skip = ValidateCmdEndRenderPass(RENDER_PASS_VERSION_2, commandBuffer); return skip; } void CoreChecks::RecordCmdEndRenderPassLayouts(VkCommandBuffer commandBuffer) { CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); TransitionFinalSubpassLayouts(cb_state, cb_state->activeRenderPassBeginInfo.ptr(), cb_state->activeFramebuffer.get()); } void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) { // Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need. RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass(commandBuffer); } void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) { // Record the end at the CoreLevel to ensure StateTracker cleanup doesn't step on anything we need. RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassEndInfo); } void CoreChecks::PostCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassEndInfo) { RecordCmdEndRenderPassLayouts(commandBuffer); StateTracker::PostCallRecordCmdEndRenderPass2(commandBuffer, pSubpassEndInfo); } bool CoreChecks::ValidateFramebuffer(VkCommandBuffer primaryBuffer, const CMD_BUFFER_STATE *pCB, VkCommandBuffer secondaryBuffer, const CMD_BUFFER_STATE *pSubCB, const char *caller) const { bool skip = false; if (!pSubCB->beginInfo.pInheritanceInfo) { return skip; } VkFramebuffer primary_fb = pCB->activeFramebuffer ? pCB->activeFramebuffer->framebuffer() : VK_NULL_HANDLE; VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer; if (secondary_fb != VK_NULL_HANDLE) { if (primary_fb != secondary_fb) { LogObjectList objlist(primaryBuffer); objlist.add(secondaryBuffer); objlist.add(secondary_fb); objlist.add(primary_fb); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00099", "vkCmdExecuteCommands() called w/ invalid secondary %s which has a %s" " that is not the same as the primary command buffer's current active %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str(), report_data->FormatHandle(primary_fb).c_str()); } auto fb = GetFramebufferState(secondary_fb); if (!fb) { LogObjectList objlist(primaryBuffer); objlist.add(secondaryBuffer); objlist.add(secondary_fb); skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid %s.", report_data->FormatHandle(secondaryBuffer).c_str(), report_data->FormatHandle(secondary_fb).c_str()); return skip; } } return skip; } bool CoreChecks::ValidateSecondaryCommandBufferState(const CMD_BUFFER_STATE *pCB, const CMD_BUFFER_STATE *pSubCB) const { bool skip = false; layer_data::unordered_set<int> active_types; if (!disabled[query_validation]) { for (const auto &query_object : pCB->activeQueries) { auto query_pool_state = GetQueryPoolState(query_object.pool); if (query_pool_state) { if (query_pool_state->createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS && pSubCB->beginInfo.pInheritanceInfo) { VkQueryPipelineStatisticFlags cmd_buf_statistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics; if ((cmd_buf_statistics & query_pool_state->createInfo.pipelineStatistics) != cmd_buf_statistics) { LogObjectList objlist(pCB->commandBuffer()); objlist.add(query_object.pool); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-00104", "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" ". Pipeline statistics is being queried so the command buffer must have all bits set on the queryPool.", report_data->FormatHandle(pCB->commandBuffer()).c_str(), report_data->FormatHandle(query_object.pool).c_str()); } } active_types.insert(query_pool_state->createInfo.queryType); } } for (const auto &query_object : pSubCB->startedQueries) { auto query_pool_state = GetQueryPoolState(query_object.pool); if (query_pool_state && active_types.count(query_pool_state->createInfo.queryType)) { LogObjectList objlist(pCB->commandBuffer()); objlist.add(query_object.pool); skip |= LogError(objlist, kVUID_Core_DrawState_InvalidSecondaryCommandBuffer, "vkCmdExecuteCommands() called w/ invalid %s which has invalid active %s" " of type %d but a query of that type has been started on secondary %s.", report_data->FormatHandle(pCB->commandBuffer()).c_str(), report_data->FormatHandle(query_object.pool).c_str(), query_pool_state->createInfo.queryType, report_data->FormatHandle(pSubCB->commandBuffer()).c_str()); } } } auto primary_pool = pCB->command_pool.get(); auto secondary_pool = pSubCB->command_pool.get(); if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) { LogObjectList objlist(pSubCB->commandBuffer()); objlist.add(pCB->commandBuffer()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00094", "vkCmdExecuteCommands(): Primary %s created in queue family %d has secondary " "%s created in queue family %d.", report_data->FormatHandle(pCB->commandBuffer()).c_str(), primary_pool->queueFamilyIndex, report_data->FormatHandle(pSubCB->commandBuffer()).c_str(), secondary_pool->queueFamilyIndex); } return skip; } // Object that simulates the inherited viewport/scissor state as the device executes the called secondary command buffers. // Visit the calling primary command buffer first, then the called secondaries in order. // Contact David Zhao Akeley <[email protected]> for clarifications and bug fixes. class CoreChecks::ViewportScissorInheritanceTracker { static_assert(4 == sizeof(CMD_BUFFER_STATE::viewportMask), "Adjust max_viewports to match viewportMask bit width"); static constexpr uint32_t kMaxViewports = 32, kNotTrashed = uint32_t(-2), kTrashedByPrimary = uint32_t(-1); const ValidationObject &validation_; const CMD_BUFFER_STATE *primary_state_ = nullptr; uint32_t viewport_mask_; uint32_t scissor_mask_; uint32_t viewport_trashed_by_[kMaxViewports]; // filled in VisitPrimary. uint32_t scissor_trashed_by_[kMaxViewports]; VkViewport viewports_to_inherit_[kMaxViewports]; uint32_t viewport_count_to_inherit_; // 0 if viewport count (EXT state) has never been defined (but not trashed) uint32_t scissor_count_to_inherit_; // 0 if scissor count (EXT state) has never been defined (but not trashed) uint32_t viewport_count_trashed_by_; uint32_t scissor_count_trashed_by_; public: ViewportScissorInheritanceTracker(const ValidationObject &validation) : validation_(validation) {} bool VisitPrimary(const CMD_BUFFER_STATE *primary_state) { assert(!primary_state_); primary_state_ = primary_state; viewport_mask_ = primary_state->viewportMask | primary_state->viewportWithCountMask; scissor_mask_ = primary_state->scissorMask | primary_state->scissorWithCountMask; for (uint32_t n = 0; n < kMaxViewports; ++n) { uint32_t bit = uint32_t(1) << n; viewport_trashed_by_[n] = primary_state->trashedViewportMask & bit ? kTrashedByPrimary : kNotTrashed; scissor_trashed_by_[n] = primary_state->trashedScissorMask & bit ? kTrashedByPrimary : kNotTrashed; if (viewport_mask_ & bit) { viewports_to_inherit_[n] = primary_state->dynamicViewports[n]; } } viewport_count_to_inherit_ = primary_state->viewportWithCountCount; scissor_count_to_inherit_ = primary_state->scissorWithCountCount; viewport_count_trashed_by_ = primary_state->trashedViewportCount ? kTrashedByPrimary : kNotTrashed; scissor_count_trashed_by_ = primary_state->trashedScissorCount ? kTrashedByPrimary : kNotTrashed; return false; } bool VisitSecondary(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) { bool skip = false; if (secondary_state->inheritedViewportDepths.empty()) { skip |= VisitSecondaryNoInheritance(cmd_buffer_idx, secondary_state); } else { skip |= VisitSecondaryInheritance(cmd_buffer_idx, secondary_state); } // See note at end of VisitSecondaryNoInheritance. if (secondary_state->trashedViewportCount) { viewport_count_trashed_by_ = cmd_buffer_idx; } if (secondary_state->trashedScissorCount) { scissor_count_trashed_by_ = cmd_buffer_idx; } return skip; } private: // Track state inheritance as specified by VK_NV_inherited_scissor_viewport, including states // overwritten to undefined value by bound pipelines with non-dynamic state. bool VisitSecondaryNoInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) { viewport_mask_ |= secondary_state->viewportMask | secondary_state->viewportWithCountMask; scissor_mask_ |= secondary_state->scissorMask | secondary_state->scissorWithCountMask; for (uint32_t n = 0; n < kMaxViewports; ++n) { uint32_t bit = uint32_t(1) << n; if ((secondary_state->viewportMask | secondary_state->viewportWithCountMask) & bit) { viewports_to_inherit_[n] = secondary_state->dynamicViewports[n]; viewport_trashed_by_[n] = kNotTrashed; } if ((secondary_state->scissorMask | secondary_state->scissorWithCountMask) & bit) { scissor_trashed_by_[n] = kNotTrashed; } if (secondary_state->viewportWithCountCount != 0) { viewport_count_to_inherit_ = secondary_state->viewportWithCountCount; viewport_count_trashed_by_ = kNotTrashed; } if (secondary_state->scissorWithCountCount != 0) { scissor_count_to_inherit_ = secondary_state->scissorWithCountCount; scissor_count_trashed_by_ = kNotTrashed; } // Order of above vs below matters here. if (secondary_state->trashedViewportMask & bit) { viewport_trashed_by_[n] = cmd_buffer_idx; } if (secondary_state->trashedScissorMask & bit) { scissor_trashed_by_[n] = cmd_buffer_idx; } // Check trashing dynamic viewport/scissor count in VisitSecondary (at end) as even secondary command buffers enabling // viewport/scissor state inheritance may define this state statically in bound graphics pipelines. } return false; } // Validate needed inherited state as specified by VK_NV_inherited_scissor_viewport. bool VisitSecondaryInheritance(uint32_t cmd_buffer_idx, const CMD_BUFFER_STATE *secondary_state) { bool skip = false; uint32_t check_viewport_count = 0, check_scissor_count = 0; // Common code for reporting missing inherited state (for a myriad of reasons). auto check_missing_inherit = [&](uint32_t was_ever_defined, uint32_t trashed_by, VkDynamicState state, uint32_t index = 0, uint32_t static_use_count = 0, const VkViewport *inherited_viewport = nullptr, const VkViewport *expected_viewport_depth = nullptr) { if (was_ever_defined && trashed_by == kNotTrashed) { if (state != VK_DYNAMIC_STATE_VIEWPORT) return false; assert(inherited_viewport != nullptr && expected_viewport_depth != nullptr); if (inherited_viewport->minDepth != expected_viewport_depth->minDepth || inherited_viewport->maxDepth != expected_viewport_depth->maxDepth) { return validation_.LogError( primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "vkCmdExecuteCommands(): Draw commands in pCommandBuffers[%u] (%s) consume inherited viewport %u %s" "but this state was not inherited as its depth range [%f, %f] does not match " "pViewportDepths[%u] = [%f, %f]", unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str(), unsigned(index), index >= static_use_count ? "(with count) " : "", inherited_viewport->minDepth, inherited_viewport->maxDepth, unsigned(cmd_buffer_idx), expected_viewport_depth->minDepth, expected_viewport_depth->maxDepth); // akeley98 note: This VUID is not ideal; however, there isn't a more relevant VUID as // it isn't illegal in itself to have mismatched inherited viewport depths. // The error only occurs upon attempting to consume the viewport. } else { return false; } } const char *state_name; bool format_index = false; switch (state) { case VK_DYNAMIC_STATE_SCISSOR: state_name = "scissor"; format_index = true; break; case VK_DYNAMIC_STATE_VIEWPORT: state_name = "viewport"; format_index = true; break; case VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT: state_name = "dynamic viewport count"; break; case VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT: state_name = "dynamic scissor count"; break; default: assert(0); state_name = "<unknown state, report bug>"; break; } std::stringstream ss; ss << "vkCmdExecuteCommands(): Draw commands in pCommandBuffers[" << cmd_buffer_idx << "] (" << validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str() << ") consume inherited " << state_name << " "; if (format_index) { if (index >= static_use_count) { ss << "(with count) "; } ss << index << " "; } ss << "but this state "; if (!was_ever_defined) { ss << "was never defined."; } else if (trashed_by == kTrashedByPrimary) { ss << "was left undefined after vkCmdExecuteCommands or vkCmdBindPipeline (with non-dynamic state) in " "the calling primary command buffer."; } else { ss << "was left undefined after vkCmdBindPipeline (with non-dynamic state) in pCommandBuffers[" << trashed_by << "]."; } return validation_.LogError(primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "%s", ss.str().c_str()); }; // Check if secondary command buffer uses viewport/scissor-with-count state, and validate this state if so. if (secondary_state->usedDynamicViewportCount) { if (viewport_count_to_inherit_ == 0 || viewport_count_trashed_by_ != kNotTrashed) { skip |= check_missing_inherit(viewport_count_to_inherit_, viewport_count_trashed_by_, VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT_EXT); } else { check_viewport_count = viewport_count_to_inherit_; } } if (secondary_state->usedDynamicScissorCount) { if (scissor_count_to_inherit_ == 0 || scissor_count_trashed_by_ != kNotTrashed) { skip |= check_missing_inherit(scissor_count_to_inherit_, scissor_count_trashed_by_, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT_EXT); } else { check_scissor_count = scissor_count_to_inherit_; } } // Check the maximum of (viewports used by pipelines with static viewport count, "" dynamic viewport count) // but limit to length of inheritedViewportDepths array and uint32_t bit width (validation layer limit). check_viewport_count = std::min(std::min(kMaxViewports, uint32_t(secondary_state->inheritedViewportDepths.size())), std::max(check_viewport_count, secondary_state->usedViewportScissorCount)); check_scissor_count = std::min(kMaxViewports, std::max(check_scissor_count, secondary_state->usedViewportScissorCount)); if (secondary_state->usedDynamicViewportCount && viewport_count_to_inherit_ > secondary_state->inheritedViewportDepths.size()) { skip |= validation_.LogError( primary_state_->commandBuffer(), "VUID-vkCmdDraw-commandBuffer-02701", "vkCmdExecuteCommands(): " "Draw commands in pCommandBuffers[%u] (%s) consume inherited dynamic viewport with count state " "but the dynamic viewport count (%u) exceeds the inheritance limit (viewportDepthCount=%u).", unsigned(cmd_buffer_idx), validation_.report_data->FormatHandle(secondary_state->commandBuffer()).c_str(), unsigned(viewport_count_to_inherit_), unsigned(secondary_state->inheritedViewportDepths.size())); } for (uint32_t n = 0; n < check_viewport_count; ++n) { skip |= check_missing_inherit(viewport_mask_ & uint32_t(1) << n, viewport_trashed_by_[n], VK_DYNAMIC_STATE_VIEWPORT, n, secondary_state->usedViewportScissorCount, &viewports_to_inherit_[n], &secondary_state->inheritedViewportDepths[n]); } for (uint32_t n = 0; n < check_scissor_count; ++n) { skip |= check_missing_inherit(scissor_mask_ & uint32_t(1) << n, scissor_trashed_by_[n], VK_DYNAMIC_STATE_SCISSOR, n, secondary_state->usedViewportScissorCount); } return skip; } }; constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kMaxViewports; constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kNotTrashed; constexpr uint32_t CoreChecks::ViewportScissorInheritanceTracker::kTrashedByPrimary; bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; const CMD_BUFFER_STATE *sub_cb_state = NULL; layer_data::unordered_set<const CMD_BUFFER_STATE *> linked_command_buffers; ViewportScissorInheritanceTracker viewport_scissor_inheritance{*this}; if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D) { skip |= viewport_scissor_inheritance.VisitPrimary(cb_state); } for (uint32_t i = 0; i < commandBuffersCount; i++) { sub_cb_state = GetCBState(pCommandBuffers[i]); assert(sub_cb_state); if (enabled_features.inherited_viewport_scissor_features.inheritedViewportScissor2D) { skip |= viewport_scissor_inheritance.VisitSecondary(i, sub_cb_state); } if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == sub_cb_state->createInfo.level) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00088", "vkCmdExecuteCommands() called w/ Primary %s in element %u of pCommandBuffers array. All " "cmd buffers in pCommandBuffers array must be secondary.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), i); } else if (VK_COMMAND_BUFFER_LEVEL_SECONDARY == sub_cb_state->createInfo.level) { if (sub_cb_state->beginInfo.pInheritanceInfo != nullptr) { const auto secondary_rp_state = GetRenderPassState(sub_cb_state->beginInfo.pInheritanceInfo->renderPass); if (cb_state->activeRenderPass && !(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->activeRenderPass->renderPass()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00096", "vkCmdExecuteCommands(): Secondary %s is executed within a %s " "instance scope, but the Secondary Command Buffer does not have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->activeRenderPass->renderPass()).c_str()); } else if (!cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-pCommandBuffers-00100", "vkCmdExecuteCommands(): Secondary %s is executed outside a render pass " "instance scope, but the Secondary Command Buffer does have the " "VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set in VkCommandBufferBeginInfo::flags when " "the vkBeginCommandBuffer() was called.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } else if (cb_state->activeRenderPass && (sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) { // Make sure render pass is compatible with parent command buffer pass if has continue if (cb_state->activeRenderPass->renderPass() != secondary_rp_state->renderPass()) { skip |= ValidateRenderPassCompatibility( "primary command buffer", cb_state->activeRenderPass.get(), "secondary command buffer", secondary_rp_state, "vkCmdExecuteCommands()", "VUID-vkCmdExecuteCommands-pInheritanceInfo-00098"); } // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB skip |= ValidateFramebuffer(commandBuffer, cb_state, pCommandBuffers[i], sub_cb_state, "vkCmdExecuteCommands()"); if (!sub_cb_state->cmd_execute_commands_functions.empty()) { // Inherit primary's activeFramebuffer and while running validate functions for (auto &function : sub_cb_state->cmd_execute_commands_functions) { skip |= function(cb_state, cb_state->activeFramebuffer.get()); } } } } } // TODO(mlentine): Move more logic into this method skip |= ValidateSecondaryCommandBufferState(cb_state, sub_cb_state); skip |= ValidateCommandBufferState(sub_cb_state, "vkCmdExecuteCommands()", 0, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089"); if (!(sub_cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) { if (sub_cb_state->InUse()) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdExecuteCommands-pCommandBuffers-00091", "vkCmdExecuteCommands(): Cannot execute pending %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } // We use an const_cast, because one cannot query a container keyed on a non-const pointer using a const pointer if (cb_state->linkedCommandBuffers.count(const_cast<CMD_BUFFER_STATE *>(sub_cb_state))) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(sub_cb_state->commandBuffer()); skip |= LogError(objlist, "VUID-vkCmdExecuteCommands-pCommandBuffers-00092", "vkCmdExecuteCommands(): Cannot execute %s without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT " "set if previously executed in %s", report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str(), report_data->FormatHandle(cb_state->commandBuffer()).c_str()); } const auto insert_pair = linked_command_buffers.insert(sub_cb_state); if (!insert_pair.second) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdExecuteCommands-pCommandBuffers-00093", "vkCmdExecuteCommands(): Cannot duplicate %s in pCommandBuffers without " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set.", report_data->FormatHandle(cb_state->commandBuffer()).c_str()); } if (cb_state->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous LogObjectList objlist(pCommandBuffers[i]); objlist.add(cb_state->commandBuffer()); skip |= LogWarning(objlist, kVUID_Core_DrawState_InvalidCommandBufferSimultaneousUse, "vkCmdExecuteCommands(): Secondary %s does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary " "%s to be treated as if it does not have " "VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set, even though it does.", report_data->FormatHandle(pCommandBuffers[i]).c_str(), report_data->FormatHandle(cb_state->commandBuffer()).c_str()); } } if (!cb_state->activeQueries.empty() && !enabled_features.core.inheritedQueries) { skip |= LogError(pCommandBuffers[i], "VUID-vkCmdExecuteCommands-commandBuffer-00101", "vkCmdExecuteCommands(): Secondary %s cannot be submitted with a query in flight and " "inherited queries not supported on this device.", report_data->FormatHandle(pCommandBuffers[i]).c_str()); } // Validate initial layout uses vs. the primary cmd buffer state // Novel Valid usage: "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001" // initial layout usage of secondary command buffers resources must match parent command buffer const auto *const_cb_state = static_cast<const CMD_BUFFER_STATE *>(cb_state); for (const auto &sub_layout_map_entry : sub_cb_state->image_layout_map) { const auto image = sub_layout_map_entry.first; const auto *image_state = GetImageState(image); if (!image_state) continue; // Can't set layouts of a dead image const auto *cb_subres_map = const_cb_state->GetImageSubresourceLayoutMap(image); // Const getter can be null in which case we have nothing to check against for this image... if (!cb_subres_map) continue; const auto *sub_cb_subres_map = &sub_layout_map_entry.second; // Validate the initial_uses, that they match the current state of the primary cb, or absent a current state, // that the match any initial_layout. for (const auto &subres_layout : *sub_cb_subres_map) { const auto &sub_layout = subres_layout.initial_layout; const auto &subresource = subres_layout.subresource; if (VK_IMAGE_LAYOUT_UNDEFINED == sub_layout) continue; // secondary doesn't care about current or initial // Look up the layout to compared to the intial layout of the sub command buffer (current else initial) const auto *cb_layouts = cb_subres_map->GetSubresourceLayouts(subresource); auto cb_layout = cb_layouts ? cb_layouts->current_layout : kInvalidLayout; const char *layout_type = "current"; if (cb_layout == kInvalidLayout) { cb_layout = cb_layouts ? cb_layouts->initial_layout : kInvalidLayout; layout_type = "initial"; } if ((cb_layout != kInvalidLayout) && (cb_layout != sub_layout)) { skip |= LogError(pCommandBuffers[i], "UNASSIGNED-vkCmdExecuteCommands-commandBuffer-00001", "%s: Executed secondary command buffer using %s (subresource: aspectMask 0x%X array layer %u, " "mip level %u) which expects layout %s--instead, image %s layout is %s.", "vkCmdExecuteCommands():", report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer, subresource.mipLevel, string_VkImageLayout(sub_layout), layout_type, string_VkImageLayout(cb_layout)); } } } // All commands buffers involved must be protected or unprotected if ((cb_state->unprotected == false) && (sub_cb_state->unprotected == true)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(sub_cb_state->commandBuffer()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01820", "vkCmdExecuteCommands(): command buffer %s is protected while secondary command buffer %s is a unprotected", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } else if ((cb_state->unprotected == true) && (sub_cb_state->unprotected == false)) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(sub_cb_state->commandBuffer()); skip |= LogError( objlist, "VUID-vkCmdExecuteCommands-commandBuffer-01821", "vkCmdExecuteCommands(): command buffer %s is unprotected while secondary command buffer %s is a protected", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(sub_cb_state->commandBuffer()).c_str()); } } skip |= ValidateCmd(cb_state, CMD_EXECUTECOMMANDS, "vkCmdExecuteCommands()"); return skip; } bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) const { bool skip = false; const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { skip = LogError(mem, "VUID-vkMapMemory-memory-00682", "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } if (mem_info->multi_instance) { skip = LogError(mem, "VUID-vkMapMemory-memory-00683", "Memory (%s) must not have been allocated with multiple instances -- either by supplying a deviceMask " "with more than one bit set, or by allocation from a heap with the MULTI_INSTANCE heap flag set.", report_data->FormatHandle(mem).c_str()); } skip |= ValidateMapMemRange(mem_info, offset, size); } return skip; } bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) const { bool skip = false; const auto mem_info = GetDevMemState(mem); if (mem_info && !mem_info->mapped_range.size) { // Valid Usage: memory must currently be mapped skip |= LogError(mem, "VUID-vkUnmapMemory-memory-00689", "Unmapping Memory without memory being mapped: %s.", report_data->FormatHandle(mem).c_str()); } return skip; } bool CoreChecks::ValidateMemoryIsMapped(const char *funcName, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; for (uint32_t i = 0; i < memRangeCount; ++i) { auto mem_info = GetDevMemState(pMemRanges[i].memory); if (mem_info) { // Makes sure the memory is already mapped if (mem_info->mapped_range.size == 0) { skip = LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-memory-00684", "%s: Attempting to use memory (%s) that is not currently host mapped.", funcName, report_data->FormatHandle(pMemRanges[i].memory).c_str()); } if (pMemRanges[i].size == VK_WHOLE_SIZE) { if (mem_info->mapped_range.offset > pMemRanges[i].offset) { skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00686", "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(mem_info->mapped_range.offset)); } } else { const uint64_t data_end = (mem_info->mapped_range.size == VK_WHOLE_SIZE) ? mem_info->alloc_info.allocationSize : (mem_info->mapped_range.offset + mem_info->mapped_range.size); if ((mem_info->mapped_range.offset > pMemRanges[i].offset) || (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) { skip |= LogError(pMemRanges[i].memory, "VUID-VkMappedMemoryRange-size-00685", "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER ") exceed the Memory Object's upper-bound (" PRINTF_SIZE_T_SPECIFIER ").", funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size), static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end)); } } } } return skip; } bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(const char *func_name, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) const { bool skip = false; for (uint32_t i = 0; i < mem_range_count; ++i) { const uint64_t atom_size = phys_dev_props.limits.nonCoherentAtomSize; const VkDeviceSize offset = mem_ranges[i].offset; const VkDeviceSize size = mem_ranges[i].size; if (SafeModulo(offset, atom_size) != 0) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-offset-00687", "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ").", func_name, i, offset, atom_size); } auto mem_info = GetDevMemState(mem_ranges[i].memory); if (mem_info) { const auto allocation_size = mem_info->alloc_info.allocationSize; if (size == VK_WHOLE_SIZE) { const auto mapping_offset = mem_info->mapped_range.offset; const auto mapping_size = mem_info->mapped_range.size; const auto mapping_end = ((mapping_size == VK_WHOLE_SIZE) ? allocation_size : mapping_offset + mapping_size); if (SafeModulo(mapping_end, atom_size) != 0 && mapping_end != allocation_size) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01389", "%s: Size in pMemRanges[%d] is VK_WHOLE_SIZE and the mapping end (0x%" PRIxLEAST64 " = 0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64 ") not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ") and not equal to the end of the memory object (0x%" PRIxLEAST64 ").", func_name, i, mapping_end, mapping_offset, mapping_size, atom_size, allocation_size); } } else { const auto range_end = size + offset; if (range_end != allocation_size && SafeModulo(size, atom_size) != 0) { skip |= LogError(mem_ranges->memory, "VUID-VkMappedMemoryRange-size-01390", "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 ") and offset + size (0x%" PRIxLEAST64 " + 0x%" PRIxLEAST64 " = 0x%" PRIxLEAST64 ") not equal to the memory size (0x%" PRIxLEAST64 ").", func_name, i, size, atom_size, offset, size, range_end, allocation_size); } } } } return skip; } bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkFlushMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) const { bool skip = false; skip |= ValidateMappedMemoryRangeDeviceLimits("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); skip |= ValidateMemoryIsMapped("vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges); return skip; } bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) const { bool skip = false; const auto mem_info = GetDevMemState(mem); if (mem_info) { if ((phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip = LogError(mem, "VUID-vkGetDeviceMemoryCommitment-memory-00690", "vkGetDeviceMemoryCommitment(): Querying commitment for memory without " "VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT set: %s.", report_data->FormatHandle(mem).c_str()); } } return skip; } bool CoreChecks::ValidateBindImageMemory(uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos, const char *api_name) const { bool skip = false; bool bind_image_mem_2 = strcmp(api_name, "vkBindImageMemory()") != 0; char error_prefix[128]; strcpy(error_prefix, api_name); // Track all image sub resources if they are bound for bind_image_mem_2 // uint32_t[3] is which index in pBindInfos for max 3 planes // Non disjoint images act as a single plane layer_data::unordered_map<VkImage, std::array<uint32_t, 3>> resources_bound; for (uint32_t i = 0; i < bindInfoCount; i++) { if (bind_image_mem_2 == true) { sprintf(error_prefix, "%s pBindInfos[%u]", api_name, i); } const VkBindImageMemoryInfo &bind_info = pBindInfos[i]; const IMAGE_STATE *image_state = GetImageState(bind_info.image); if (image_state) { // Track objects tied to memory skip |= ValidateSetMemBinding(bind_info.memory, image_state->Handle(), error_prefix); const auto plane_info = LvlFindInChain<VkBindImagePlaneMemoryInfo>(bind_info.pNext); const auto mem_info = GetDevMemState(bind_info.memory); // Need extra check for disjoint flag incase called without bindImage2 and don't want false positive errors // no 'else' case as if that happens another VUID is already being triggered for it being invalid if ((plane_info == nullptr) && (image_state->disjoint == false)) { // Check non-disjoint images VkMemoryRequirements // All validation using the image_state->requirements for external AHB is check in android only section if (image_state->IsExternalAHB() == false) { const VkMemoryRequirements &mem_req = image_state->requirements[0]; // Validate memory requirements alignment if (SafeModulo(bind_info.memoryOffset, mem_req.alignment) != 0) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memoryOffset-01048"; } else if (device_extensions.vk_khr_sampler_ycbcr_conversion) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01616"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memoryOffset-01613"; } skip |= LogError(bind_info.image, validation_error, "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", error_prefix, bind_info.memoryOffset, mem_req.alignment); } if (mem_info) { safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info; // Validate memory requirements size if (mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-size-01049"; } else if (device_extensions.vk_khr_sampler_ycbcr_conversion) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01617"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01614"; } skip |= LogError(bind_info.image, validation_error, "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with image.", error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, mem_req.size); } // Validate memory type used { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-01047"; } else if (device_extensions.vk_khr_sampler_ycbcr_conversion) { validation_error = "VUID-VkBindImageMemoryInfo-pNext-01615"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01612"; } skip |= ValidateMemoryTypes(mem_info, mem_req.memoryTypeBits, error_prefix, validation_error); } } } if (bind_image_mem_2 == true) { // since its a non-disjoint image, finding VkImage in map is a duplicate auto it = resources_bound.find(image_state->image()); if (it == resources_bound.end()) { std::array<uint32_t, 3> bound_index = {i, UINT32_MAX, UINT32_MAX}; resources_bound.emplace(image_state->image(), bound_index); } else { skip |= LogError( bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006", "%s: The same non-disjoint image resource is being bound twice at pBindInfos[%d] and pBindInfos[%d]", error_prefix, it->second[0], i); } } } else if ((plane_info != nullptr) && (image_state->disjoint == true)) { // Check disjoint images VkMemoryRequirements for given plane int plane = 0; // All validation using the image_state->plane*_requirements for external AHB is check in android only section if (image_state->IsExternalAHB() == false) { const VkImageAspectFlagBits aspect = plane_info->planeAspect; switch (aspect) { case VK_IMAGE_ASPECT_PLANE_0_BIT: plane = 0; break; case VK_IMAGE_ASPECT_PLANE_1_BIT: plane = 1; break; case VK_IMAGE_ASPECT_PLANE_2_BIT: plane = 2; break; default: assert(false); // parameter validation should have caught this break; } const VkMemoryRequirements &disjoint_mem_req = image_state->requirements[plane]; // Validate memory requirements alignment if (SafeModulo(bind_info.memoryOffset, disjoint_mem_req.alignment) != 0) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01620", "%s: memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the VkMemoryRequirements::alignment value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements2 with disjoint image for aspect plane %s.", error_prefix, bind_info.memoryOffset, disjoint_mem_req.alignment, string_VkImageAspectFlagBits(aspect)); } if (mem_info) { safe_VkMemoryAllocateInfo alloc_info = mem_info->alloc_info; // Validate memory requirements size if (disjoint_mem_req.size > alloc_info.allocationSize - bind_info.memoryOffset) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01621", "%s: memory size minus memoryOffset is 0x%" PRIxLEAST64 " but must be at least as large as VkMemoryRequirements::size value 0x%" PRIxLEAST64 ", returned from a call to vkGetImageMemoryRequirements with disjoint image for aspect plane %s.", error_prefix, alloc_info.allocationSize - bind_info.memoryOffset, disjoint_mem_req.size, string_VkImageAspectFlagBits(aspect)); } // Validate memory type used { skip |= ValidateMemoryTypes(mem_info, disjoint_mem_req.memoryTypeBits, error_prefix, "VUID-VkBindImageMemoryInfo-pNext-01619"); } } } auto it = resources_bound.find(image_state->image()); if (it == resources_bound.end()) { std::array<uint32_t, 3> bound_index = {UINT32_MAX, UINT32_MAX, UINT32_MAX}; bound_index[plane] = i; resources_bound.emplace(image_state->image(), bound_index); } else { if (it->second[plane] == UINT32_MAX) { it->second[plane] = i; } else { skip |= LogError(bind_info.image, "VUID-vkBindImageMemory2-pBindInfos-04006", "%s: The same disjoint image sub-resource for plane %d is being bound twice at " "pBindInfos[%d] and pBindInfos[%d]", error_prefix, plane, it->second[plane], i); } } } if (mem_info) { // Validate bound memory range information // if memory is exported to an AHB then the mem_info->allocationSize must be zero and this check is not needed if ((mem_info->IsExport() == false) || ((mem_info->export_handle_type_flags & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID) == 0)) { skip |= ValidateInsertImageMemoryRange(bind_info.image, mem_info, bind_info.memoryOffset, error_prefix); } // Validate dedicated allocation if (mem_info->IsDedicatedImage()) { if (enabled_features.dedicated_allocation_image_aliasing_features.dedicatedAllocationImageAliasing) { const auto current_image_state = GetImageState(bind_info.image); if ((bind_info.memoryOffset != 0) || !current_image_state || !current_image_state->IsCreateInfoDedicatedAllocationImageAliasingCompatible( mem_info->dedicated->create_info.image)) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-02629"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-02629"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); objlist.add(mem_info->dedicated->handle); skip |= LogError( objlist, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must compatible " "with %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str(), report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset); } } else { if ((bind_info.memoryOffset != 0) || (mem_info->dedicated->handle.Cast<VkImage>() != bind_info.image)) { const char *validation_error; if (bind_image_mem_2 == false) { validation_error = "VUID-vkBindImageMemory-memory-01509"; } else { validation_error = "VUID-VkBindImageMemoryInfo-memory-01509"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); objlist.add(mem_info->dedicated->handle); skip |= LogError(objlist, validation_error, "%s: for dedicated memory allocation %s, VkMemoryDedicatedAllocateInfo:: %s must be equal " "to %s and memoryOffset 0x%" PRIxLEAST64 " must be zero.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(mem_info->dedicated->handle).c_str(), report_data->FormatHandle(bind_info.image).c_str(), bind_info.memoryOffset); } } } // Validate export memory handles if ((mem_info->export_handle_type_flags != 0) && ((mem_info->export_handle_type_flags & image_state->external_memory_handle) == 0)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-memory-02728" : "VUID-vkBindImageMemory-memory-02728"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) has an external handleType of %s which does not include at least " "one handle from VkImage (%s) handleType %s.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->export_handle_type_flags).c_str(), report_data->FormatHandle(bind_info.image).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str()); } // Validate import memory handles if (mem_info->IsImportAHB() == true) { skip |= ValidateImageImportedHandleANDROID(api_name, image_state->external_memory_handle, bind_info.memory, bind_info.image); } else if (mem_info->IsImport() == true) { if ((mem_info->import_handle_type_flags & image_state->external_memory_handle) == 0) { const char *vuid = nullptr; if ((bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindImageMemoryInfo-memory-02989"; } else if ((!bind_image_mem_2) && (device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindImageMemory-memory-02989"; } else if ((bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-VkBindImageMemoryInfo-memory-02729"; } else if ((!bind_image_mem_2) && (!device_extensions.vk_android_external_memory_android_hardware_buffer)) { vuid = "VUID-vkBindImageMemory-memory-02729"; } LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with an import operation with handleType of %s " "which is not set in the VkImage (%s) VkExternalMemoryImageCreateInfo::handleType (%s)", api_name, report_data->FormatHandle(bind_info.memory).c_str(), string_VkExternalMemoryHandleTypeFlags(mem_info->import_handle_type_flags).c_str(), report_data->FormatHandle(bind_info.image).c_str(), string_VkExternalMemoryHandleTypeFlags(image_state->external_memory_handle).c_str()); } } // Validate mix of protected buffer and memory if ((image_state->unprotected == false) && (mem_info->unprotected == true)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01901" : "VUID-vkBindImageMemory-None-01901"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was not created with protected memory but the VkImage (%s) was " "set to use protected memory.", api_name, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(bind_info.image).c_str()); } else if ((image_state->unprotected == true) && (mem_info->unprotected == false)) { const char *vuid = bind_image_mem_2 ? "VUID-VkBindImageMemoryInfo-None-01902" : "VUID-vkBindImageMemory-None-01902"; LogObjectList objlist(bind_info.image); objlist.add(bind_info.memory); skip |= LogError(objlist, vuid, "%s: The VkDeviceMemory (%s) was created with protected memory but the VkImage (%s) was not " "set to use protected memory.", api_name, report_data->FormatHandle(bind_info.memory).c_str(), report_data->FormatHandle(bind_info.image).c_str()); } } const auto swapchain_info = LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(bind_info.pNext); if (swapchain_info) { if (bind_info.memory != VK_NULL_HANDLE) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01631", "%s: %s is not VK_NULL_HANDLE.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str()); } if (image_state->create_from_swapchain != swapchain_info->swapchain) { LogObjectList objlist(image_state->image()); objlist.add(image_state->create_from_swapchain); objlist.add(swapchain_info->swapchain); skip |= LogError( objlist, kVUID_Core_BindImageMemory_Swapchain, "%s: %s is created by %s, but the image is bound by %s. The image should be created and bound by the same " "swapchain", error_prefix, report_data->FormatHandle(image_state->image()).c_str(), report_data->FormatHandle(image_state->create_from_swapchain).c_str(), report_data->FormatHandle(swapchain_info->swapchain).c_str()); } const auto swapchain_state = GetSwapchainState(swapchain_info->swapchain); if (swapchain_state && swapchain_state->images.size() <= swapchain_info->imageIndex) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemorySwapchainInfoKHR-imageIndex-01644", "%s: imageIndex (%i) is out of bounds of %s images (size: %i)", error_prefix, swapchain_info->imageIndex, report_data->FormatHandle(swapchain_info->swapchain).c_str(), static_cast<int>(swapchain_state->images.size())); } } else { if (image_state->create_from_swapchain) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-image-01630", "%s: pNext of VkBindImageMemoryInfo doesn't include VkBindImageMemorySwapchainInfoKHR.", error_prefix); } if (!mem_info) { skip |= LogError(bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01632", "%s: %s is invalid.", error_prefix, report_data->FormatHandle(bind_info.memory).c_str()); } } if (plane_info) { // Checks for disjoint bit in image if (image_state->disjoint == false) { skip |= LogError( bind_info.image, "VUID-VkBindImageMemoryInfo-pNext-01618", "%s: pNext of VkBindImageMemoryInfo contains VkBindImagePlaneMemoryInfo and %s is not created with " "VK_IMAGE_CREATE_DISJOINT_BIT.", error_prefix, report_data->FormatHandle(image_state->image()).c_str()); } // Make sure planeAspect is only a single, valid plane uint32_t planes = FormatPlaneCount(image_state->createInfo.format); VkImageAspectFlags aspect = plane_info->planeAspect; if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) { skip |= LogError( bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283", "%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT.", error_prefix, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) { skip |= LogError( bind_info.image, "VUID-VkBindImagePlaneMemoryInfo-planeAspect-02283", "%s: Image %s VkBindImagePlaneMemoryInfo::planeAspect is %s but can only be VK_IMAGE_ASPECT_PLANE_0_BIT" "or VK_IMAGE_ASPECT_PLANE_1_BIT or VK_IMAGE_ASPECT_PLANE_2_BIT.", error_prefix, report_data->FormatHandle(image_state->image()).c_str(), string_VkImageAspectFlags(aspect).c_str()); } } } } // Check to make sure all disjoint planes were bound for (auto &resource : resources_bound) { const IMAGE_STATE *image_state = GetImageState(resource.first); if (image_state->disjoint == true) { uint32_t total_planes = FormatPlaneCount(image_state->createInfo.format); for (uint32_t i = 0; i < total_planes; i++) { if (resource.second[i] == UINT32_MAX) { skip |= LogError(resource.first, "VUID-vkBindImageMemory2-pBindInfos-02858", "%s: Plane %u of the disjoint image was not bound. All %d planes need to bound individually " "in separate pBindInfos in a single call.", api_name, i, total_planes); } } } } return skip; } bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) const { bool skip = false; const IMAGE_STATE *image_state = GetImageState(image); if (image_state) { // Checks for no disjoint bit if (image_state->disjoint == true) { skip |= LogError(image, "VUID-vkBindImageMemory-image-01608", "%s must not have been created with the VK_IMAGE_CREATE_DISJOINT_BIT (need to use vkBindImageMemory2).", report_data->FormatHandle(image).c_str()); } } auto bind_info = LvlInitStruct<VkBindImageMemoryInfo>(); bind_info.image = image; bind_info.memory = mem; bind_info.memoryOffset = memoryOffset; skip |= ValidateBindImageMemory(1, &bind_info, "vkBindImageMemory()"); return skip; } bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos) const { return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2()"); } bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo *pBindInfos) const { return ValidateBindImageMemory(bindInfoCount, pBindInfos, "vkBindImageMemory2KHR()"); } bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = GetEventState(event); if (event_state) { if (event_state->write_in_use) { skip |= LogError(event, kVUID_Core_DrawState_QueueForwardProgress, "vkSetEvent(): %s that is already in use by a command buffer.", report_data->FormatHandle(event).c_str()); } if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkSetEvent-event-03941", "vkSetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateResetEvent(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = GetEventState(event); if (event_state) { if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkResetEvent-event-03823", "vkResetEvent(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateGetEventStatus(VkDevice device, VkEvent event) const { bool skip = false; const auto event_state = GetEventState(event); if (event_state) { if (event_state->flags & VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR) { skip |= LogError(event, "VUID-vkGetEventStatus-event-03940", "vkGetEventStatus(): %s was created with VK_EVENT_CREATE_DEVICE_ONLY_BIT_KHR.", report_data->FormatHandle(event).c_str()); } } return skip; } bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) const { const auto queue_data = GetQueueState(queue); const auto fence_state = GetFenceState(fence); bool skip = ValidateFenceForSubmit(fence_state, "VUID-vkQueueBindSparse-fence-01114", "VUID-vkQueueBindSparse-fence-01113", "VkQueueBindSparse()"); if (skip) { return true; } const auto queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_data->queueFamilyIndex].queueFlags; if (!(queue_flags & VK_QUEUE_SPARSE_BINDING_BIT)) { skip |= LogError(queue, "VUID-vkQueueBindSparse-queuetype", "vkQueueBindSparse(): a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set."); } layer_data::unordered_set<VkSemaphore> signaled_semaphores; layer_data::unordered_set<VkSemaphore> unsignaled_semaphores; layer_data::unordered_set<VkSemaphore> internal_semaphores; auto *vuid_error = device_extensions.vk_khr_timeline_semaphore ? "VUID-vkQueueBindSparse-pWaitSemaphores-03245" : kVUID_Core_DrawState_QueueForwardProgress; for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) { const VkBindSparseInfo &bind_info = pBindInfo[bind_idx]; auto timeline_semaphore_submit_info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(pBindInfo->pNext); std::vector<SEMAPHORE_WAIT> semaphore_waits; std::vector<VkSemaphore> semaphore_signals; for (uint32_t i = 0; i < bind_info.waitSemaphoreCount; ++i) { VkSemaphore semaphore = bind_info.pWaitSemaphores[i]; const auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246", "VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, but " "pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info && bind_info.waitSemaphoreCount != timeline_semaphore_submit_info->waitSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03247", "VkQueueBindSparse: pBindInfo[%u].pWaitSemaphores[%u] (%s) is a timeline semaphore, it contains " "an instance of VkTimelineSemaphoreSubmitInfo, but waitSemaphoreValueCount (%u) is different " "than pBindInfo[%u].waitSemaphoreCount (%u)", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->waitSemaphoreValueCount, bind_idx, bind_info.waitSemaphoreCount); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY && (semaphore_state->scope == kSyncScopeInternal || internal_semaphores.count(semaphore))) { if (unsignaled_semaphores.count(semaphore) || (!(signaled_semaphores.count(semaphore)) && !(semaphore_state->signaled) && !SemaphoreWasSignaled(semaphore))) { LogObjectList objlist(semaphore); objlist.add(queue); skip |= LogError( objlist, semaphore_state->scope == kSyncScopeInternal ? vuid_error : kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): Queue %s is waiting on pBindInfo[%u].pWaitSemaphores[%u] (%s) that has no way to be " "signaled.", report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str()); } else { signaled_semaphores.erase(semaphore); unsignaled_semaphores.insert(semaphore); } } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY && semaphore_state->scope == kSyncScopeExternalTemporary) { internal_semaphores.insert(semaphore); } } for (uint32_t i = 0; i < bind_info.signalSemaphoreCount; ++i) { VkSemaphore semaphore = bind_info.pSignalSemaphores[i]; const auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && !timeline_semaphore_submit_info) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pWaitSemaphores-03246", "VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, but " "pBindInfo[%u] does not include an instance of VkTimelineSemaphoreSubmitInfo", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), bind_idx); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info && timeline_semaphore_submit_info->pSignalSemaphoreValues[i] <= semaphore_state->payload) { LogObjectList objlist(semaphore); objlist.add(queue); skip |= LogError(objlist, "VUID-VkBindSparseInfo-pSignalSemaphores-03249", "VkQueueBindSparse: signal value (0x%" PRIx64 ") in %s must be greater than current timeline semaphore %s value (0x%" PRIx64 ") in pBindInfo[%u].pSignalSemaphores[%u]", semaphore_state->payload, report_data->FormatHandle(queue).c_str(), report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->pSignalSemaphoreValues[i], bind_idx, i); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_TIMELINE && timeline_semaphore_submit_info && bind_info.signalSemaphoreCount != timeline_semaphore_submit_info->signalSemaphoreValueCount) { skip |= LogError(semaphore, "VUID-VkBindSparseInfo-pNext-03248", "VkQueueBindSparse: pBindInfo[%u].pSignalSemaphores[%u] (%s) is a timeline semaphore, it contains " "an instance of VkTimelineSemaphoreSubmitInfo, but signalSemaphoreValueCount (%u) is different " "than pBindInfo[%u].signalSemaphoreCount (%u)", bind_idx, i, report_data->FormatHandle(semaphore).c_str(), timeline_semaphore_submit_info->signalSemaphoreValueCount, bind_idx, bind_info.signalSemaphoreCount); } if (semaphore_state && semaphore_state->type == VK_SEMAPHORE_TYPE_BINARY && semaphore_state->scope == kSyncScopeInternal) { if (signaled_semaphores.count(semaphore) || (!(unsignaled_semaphores.count(semaphore)) && semaphore_state->signaled)) { LogObjectList objlist(semaphore); objlist.add(queue); objlist.add(semaphore_state->signaler.first); skip |= LogError(objlist, kVUID_Core_DrawState_QueueForwardProgress, "vkQueueBindSparse(): %s is signaling pBindInfo[%u].pSignalSemaphores[%u] (%s) that was " "previously signaled by %s but has not since been waited on by any queue.", report_data->FormatHandle(queue).c_str(), bind_idx, i, report_data->FormatHandle(semaphore).c_str(), report_data->FormatHandle(semaphore_state->signaler.first).c_str()); } else { unsignaled_semaphores.erase(semaphore); signaled_semaphores.insert(semaphore); } } } for (uint32_t image_idx = 0; image_idx < bind_info.imageBindCount; ++image_idx) { const VkSparseImageMemoryBindInfo &image_bind = bind_info.pImageBinds[image_idx]; const auto image_state = GetImageState(image_bind.image); if (image_state && !(image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT)) { skip |= LogError(image_bind.image, "VUID-VkSparseImageMemoryBindInfo-image-02901", "vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: image must have been created with " "VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT set", bind_idx, image_idx); } for (uint32_t image_bind_idx = 0; image_bind_idx < image_bind.bindCount; ++image_bind_idx) { const VkSparseImageMemoryBind &memory_bind = image_bind.pBinds[image_bind_idx]; const auto *mem_info = Get<DEVICE_MEMORY_STATE>(memory_bind.memory); if (mem_info) { if (memory_bind.memoryOffset >= mem_info->alloc_info.allocationSize) { skip |= LogError( image_bind.image, "VUID-VkSparseMemoryBind-memoryOffset-01101", "vkQueueBindSparse(): pBindInfo[%u].pImageBinds[%u]: memoryOffset is not less than the size of memory", bind_idx, image_idx); } } } } } if (skip) return skip; // Now verify maxTimelineSemaphoreValueDifference for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; ++bind_idx) { Location outer_loc(Func::vkQueueBindSparse, Struct::VkBindSparseInfo); const VkBindSparseInfo *bind_info = &pBindInfo[bind_idx]; auto *info = LvlFindInChain<VkTimelineSemaphoreSubmitInfo>(bind_info->pNext); if (info) { // If there are any timeline semaphores, this condition gets checked before the early return above if (info->waitSemaphoreValueCount) { for (uint32_t i = 0; i < bind_info->waitSemaphoreCount; ++i) { auto loc = outer_loc.dot(Field::pWaitSemaphoreValues, i); VkSemaphore semaphore = bind_info->pWaitSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pWaitSemaphoreValues[i]); } } // If there are any timeline semaphores, this condition gets checked before the early return above if (info->signalSemaphoreValueCount) { for (uint32_t i = 0; i < bind_info->signalSemaphoreCount; ++i) { auto loc = outer_loc.dot(Field::pSignalSemaphoreValues, i); VkSemaphore semaphore = bind_info->pSignalSemaphores[i]; skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, semaphore, info->pSignalSemaphoreValues[i]); } } } } return skip; } bool CoreChecks::ValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo, const char *api_name) const { bool skip = false; const auto semaphore_state = GetSemaphoreState(pSignalInfo->semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-semaphore-03257", "%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); return skip; } if (semaphore_state && semaphore_state->payload >= pSignalInfo->value) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03258", "%s(): value must be greater than current semaphore %s value", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); } for (auto &pair : queueMap) { const QUEUE_STATE &queue_state = pair.second; for (const auto &submission : queue_state.submissions) { for (const auto &signal_semaphore : submission.signalSemaphores) { if (signal_semaphore.semaphore == pSignalInfo->semaphore && pSignalInfo->value >= signal_semaphore.payload) { skip |= LogError(pSignalInfo->semaphore, "VUID-VkSemaphoreSignalInfo-value-03259", "%s(): value must be greater than value of pending signal operation " "for semaphore %s", api_name, report_data->FormatHandle(pSignalInfo->semaphore).c_str()); } } } } if (!skip) { Location loc(Func::vkSignalSemaphore, Struct::VkSemaphoreSignalInfo, Field::value); skip |= ValidateMaxTimelineSemaphoreValueDifference(loc, pSignalInfo->semaphore, pSignalInfo->value); } return skip; } bool CoreChecks::PreCallValidateSignalSemaphore(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const { return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphore"); } bool CoreChecks::PreCallValidateSignalSemaphoreKHR(VkDevice device, const VkSemaphoreSignalInfo *pSignalInfo) const { return ValidateSignalSemaphore(device, pSignalInfo, "vkSignalSemaphoreKHR"); } bool CoreChecks::ValidateImportSemaphore(VkSemaphore semaphore, const char *caller_name) const { bool skip = false; const SEMAPHORE_STATE *sema_node = GetSemaphoreState(semaphore); if (sema_node) { skip |= ValidateObjectNotInUse(sema_node, caller_name, kVUIDUndefined); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR( VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) const { return ValidateImportSemaphore(pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR"); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) const { return ValidateImportSemaphore(pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR"); } bool CoreChecks::ValidateImportFence(VkFence fence, const char *vuid, const char *caller_name) const { const FENCE_STATE *fence_node = GetFenceState(fence); bool skip = false; if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) { skip |= LogError(fence, vuid, "%s: Fence %s that is currently in use.", caller_name, report_data->FormatHandle(fence).c_str()); } return skip; } #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR( VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) const { return ValidateImportFence(pImportFenceWin32HandleInfo->fence, "VUID-vkImportFenceWin32HandleKHR-fence-04448", "vkImportFenceWin32HandleKHR()"); } #endif // VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) const { return ValidateImportFence(pImportFenceFdInfo->fence, "VUID-vkImportFenceFdKHR-fence-01463", "vkImportFenceFdKHR()"); } static VkImageCreateInfo GetSwapchainImpliedImageCreateInfo(VkSwapchainCreateInfoKHR const *pCreateInfo) { auto result = LvlInitStruct<VkImageCreateInfo>(); if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) { result.flags |= VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT; } if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) result.flags |= VK_IMAGE_CREATE_PROTECTED_BIT; if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) { result.flags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT; } result.imageType = VK_IMAGE_TYPE_2D; result.format = pCreateInfo->imageFormat; result.extent.width = pCreateInfo->imageExtent.width; result.extent.height = pCreateInfo->imageExtent.height; result.extent.depth = 1; result.mipLevels = 1; result.arrayLayers = pCreateInfo->imageArrayLayers; result.samples = VK_SAMPLE_COUNT_1_BIT; result.tiling = VK_IMAGE_TILING_OPTIMAL; result.usage = pCreateInfo->imageUsage; result.sharingMode = pCreateInfo->imageSharingMode; result.queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount; result.pQueueFamilyIndices = pCreateInfo->pQueueFamilyIndices; result.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; return result; } bool CoreChecks::ValidateCreateSwapchain(const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo, const SURFACE_STATE *surface_state, const SWAPCHAIN_NODE *old_swapchain_state) const { // All physical devices and queue families are required to be able to present to any native window on Android; require the // application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { auto support_predicate = [this](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool { // TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device return (qs.first.gpu == physical_device) && qs.second; }; const auto &support = surface_state->gpu_queue_support; bool is_supported = std::any_of(support.begin(), support.end(), support_predicate); if (!is_supported) { if (LogError( device, "VUID-VkSwapchainCreateInfoKHR-surface-01270", "%s: pCreateInfo->surface is not known at this time to be supported for presentation by this device. The " "vkGetPhysicalDeviceSurfaceSupportKHR() must be called beforehand, and it must return VK_TRUE support with " "this surface for at least one queue family of this device.", func_name)) { return true; } } } if (old_swapchain_state) { if (old_swapchain_state->createInfo.surface != pCreateInfo->surface) { if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name)) { return true; } } if (old_swapchain_state->retired) { if (LogError(pCreateInfo->oldSwapchain, "VUID-VkSwapchainCreateInfoKHR-oldSwapchain-01933", "%s: pCreateInfo->oldSwapchain is retired", func_name)) { return true; } } } if ((pCreateInfo->imageExtent.width == 0) || (pCreateInfo->imageExtent.height == 0)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01689", "%s: pCreateInfo->imageExtent = (%d, %d) which is illegal.", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height)) { return true; } } auto physical_device_state = GetPhysicalDeviceState(); bool skip = false; VkSurfaceTransformFlagBitsKHR current_transform = physical_device_state->surfaceCapabilities.currentTransform; if ((pCreateInfo->preTransform & current_transform) != pCreateInfo->preTransform) { skip |= LogPerformanceWarning(physical_device, kVUID_Core_Swapchain_PreTransform, "%s: pCreateInfo->preTransform (%s) doesn't match the currentTransform (%s) returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR, the presentation engine will transform the image " "content as part of the presentation operation.", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform), string_VkSurfaceTransformFlagBitsKHR(current_transform)); } VkSurfaceCapabilitiesKHR capabilities{}; DispatchGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device_state->phys_device, pCreateInfo->surface, &capabilities); // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount: if (pCreateInfo->minImageCount < capabilities.minImageCount) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01271", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) { return true; } } if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01272", "%s called with minImageCount = %d, which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d).", func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount)) { return true; } } // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent: if ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) || (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) || (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) || (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageExtent-01274", "%s called with imageExtent = (%d,%d), which is outside the bounds returned by " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), " "maxImageExtent = (%d,%d).", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width, capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height)) { return true; } } // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedTransforms. if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) || !(pCreateInfo->preTransform & capabilities.supportedTransforms)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string error_string = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name, string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform)); error_string += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedTransforms) { const char *new_str = string_VkSurfaceTransformFlagBitsKHR(static_cast<VkSurfaceTransformFlagBitsKHR>(1 << i)); sprintf(str, " %s\n", new_str); error_string += str; } } // Log the message that we've built up: if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-preTransform-01279", "%s.", error_string.c_str())) return true; } // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) || !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) { // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build // it up a little at a time, and then log it: std::string error_string = ""; char str[1024]; // Here's the first part of the message: sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n", func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha)); error_string += str; for (int i = 0; i < 32; i++) { // Build up the rest of the message: if ((1 << i) & capabilities.supportedCompositeAlpha) { const char *new_str = string_VkCompositeAlphaFlagBitsKHR(static_cast<VkCompositeAlphaFlagBitsKHR>(1 << i)); sprintf(str, " %s\n", new_str); error_string += str; } } // Log the message that we've built up: if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-compositeAlpha-01280", "%s.", error_string.c_str())) return true; } // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers: if (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageArrayLayers-01275", "%s called with a non-supported imageArrayLayers (i.e. %d). Maximum value is %d.", func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers)) { return true; } } // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags: if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) { const char *validation_error = "VUID-VkSwapchainCreateInfoKHR-imageUsage-01276"; if ((IsExtEnabled(device_extensions.vk_khr_shared_presentable_image) == true) && ((pCreateInfo->presentMode == VK_PRESENT_MODE_IMMEDIATE_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_MAILBOX_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_KHR) || (pCreateInfo->presentMode == VK_PRESENT_MODE_FIFO_RELAXED_KHR))) { validation_error = "VUID-VkSwapchainCreateInfoKHR-presentMode-01427"; } if (LogError(device, validation_error, "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x.", func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags)) { return true; } } if (device_extensions.vk_khr_surface_protected_capabilities && (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR)) { VkPhysicalDeviceSurfaceInfo2KHR surface_info = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR}; surface_info.surface = pCreateInfo->surface; VkSurfaceProtectedCapabilitiesKHR surface_protected_capabilities = {VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR}; VkSurfaceCapabilities2KHR surface_capabilities = {VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR}; surface_capabilities.pNext = &surface_protected_capabilities; DispatchGetPhysicalDeviceSurfaceCapabilities2KHR(physical_device_state->phys_device, &surface_info, &surface_capabilities); if (!surface_protected_capabilities.supportsProtected) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-flags-03187", "%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR but the surface " "capabilities does not have VkSurfaceProtectedCapabilitiesKHR.supportsProtected set to VK_TRUE.", func_name)) { return true; } } } std::vector<VkSurfaceFormatKHR> surface_formats; const auto *surface_formats_ref = &surface_formats; // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR(): if (physical_device_state->surface_formats.empty()) { uint32_t surface_format_count = 0; DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, nullptr); surface_formats.resize(surface_format_count); DispatchGetPhysicalDeviceSurfaceFormatsKHR(physical_device, pCreateInfo->surface, &surface_format_count, &surface_formats[0]); } else { surface_formats_ref = &physical_device_state->surface_formats; } { // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format: bool found_format = false; bool found_color_space = false; bool found_match = false; for (const auto &format : *surface_formats_ref) { if (pCreateInfo->imageFormat == format.format) { // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace: found_format = true; if (pCreateInfo->imageColorSpace == format.colorSpace) { found_match = true; break; } } else { if (pCreateInfo->imageColorSpace == format.colorSpace) { found_color_space = true; } } } if (!found_match) { if (!found_format) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageFormat (%s).", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } if (!found_color_space) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01273", "%s called with a non-supported pCreateInfo->imageColorSpace (%s).", func_name, string_VkColorSpaceKHR(pCreateInfo->imageColorSpace))) { return true; } } } } std::vector<VkPresentModeKHR> present_modes; const auto *present_modes_ref = &present_modes; // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR(): if (physical_device_state->present_modes.empty()) { uint32_t present_mode_count = 0; DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface, &present_mode_count, nullptr); present_modes.resize(present_mode_count); DispatchGetPhysicalDeviceSurfacePresentModesKHR(physical_device_state->phys_device, pCreateInfo->surface, &present_mode_count, &present_modes[0]); } else { present_modes_ref = &physical_device_state->present_modes; } // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR(): bool found_match = std::find(present_modes_ref->begin(), present_modes_ref->end(), pCreateInfo->presentMode) != present_modes_ref->end(); if (!found_match) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-presentMode-01281", "%s called with a non-supported presentMode (i.e. %s).", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) { return true; } } // Validate state for shared presentable case if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode || VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) { if (!device_extensions.vk_khr_shared_presentable_image) { if (LogError( device, kVUID_Core_DrawState_ExtensionNotEnabled, "%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not " "been enabled.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode))) { return true; } } else if (pCreateInfo->minImageCount != 1) { if (LogError( device, "VUID-VkSwapchainCreateInfoKHR-minImageCount-01383", "%s called with presentMode %s, but minImageCount value is %d. For shared presentable image, minImageCount " "must be 1.", func_name, string_VkPresentModeKHR(pCreateInfo->presentMode), pCreateInfo->minImageCount)) { return true; } } } if ((pCreateInfo->imageSharingMode == VK_SHARING_MODE_CONCURRENT) && pCreateInfo->pQueueFamilyIndices) { bool skip1 = ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkSwapchainCreateInfoKHR-imageSharingMode-01428"); if (skip1) return true; } // Validate pCreateInfo->imageUsage against GetPhysicalDeviceFormatProperties const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->imageFormat); const VkFormatFeatureFlags tiling_features = format_properties.optimalTilingFeatures; if (tiling_features == 0) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL has no supported format features on this " "physical device.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_SAMPLED_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_STORAGE_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } else if ((pCreateInfo->imageUsage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) && !(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s: pCreateInfo->imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL does not support usage that includes " "VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.", func_name, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } const VkImageCreateInfo image_create_info = GetSwapchainImpliedImageCreateInfo(pCreateInfo); VkImageFormatProperties image_properties = {}; const VkResult image_properties_result = DispatchGetPhysicalDeviceImageFormatProperties( physical_device, image_create_info.format, image_create_info.imageType, image_create_info.tiling, image_create_info.usage, image_create_info.flags, &image_properties); if (image_properties_result != VK_SUCCESS) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "vkGetPhysicalDeviceImageFormatProperties() unexpectedly failed, " "when called for %s validation with following params: " "format: %s, imageType: %s, " "tiling: %s, usage: %s, " "flags: %s.", func_name, string_VkFormat(image_create_info.format), string_VkImageType(image_create_info.imageType), string_VkImageTiling(image_create_info.tiling), string_VkImageUsageFlags(image_create_info.usage).c_str(), string_VkImageCreateFlags(image_create_info.flags).c_str())) { return true; } } // Validate pCreateInfo->imageArrayLayers against VkImageFormatProperties::maxArrayLayers if (pCreateInfo->imageArrayLayers > image_properties.maxArrayLayers) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s called with a non-supported imageArrayLayers (i.e. %d). " "Maximum value returned by vkGetPhysicalDeviceImageFormatProperties() is %d " "for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL", func_name, pCreateInfo->imageArrayLayers, image_properties.maxArrayLayers, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } // Validate pCreateInfo->imageExtent against VkImageFormatProperties::maxExtent if ((pCreateInfo->imageExtent.width > image_properties.maxExtent.width) || (pCreateInfo->imageExtent.height > image_properties.maxExtent.height)) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-imageFormat-01778", "%s called with imageExtent = (%d,%d), which is bigger than max extent (%d,%d)" "returned by vkGetPhysicalDeviceImageFormatProperties(): " "for imageFormat %s with tiling VK_IMAGE_TILING_OPTIMAL", func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height, image_properties.maxExtent.width, image_properties.maxExtent.height, string_VkFormat(pCreateInfo->imageFormat))) { return true; } } if ((pCreateInfo->flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) && device_group_create_info.physicalDeviceCount == 1) { if (LogError(device, "VUID-VkSwapchainCreateInfoKHR-physicalDeviceCount-01429", "%s called with flags containing VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR" "but logical device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount equal to 1", func_name)) { return true; } } return skip; } bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) const { const auto surface_state = GetSurfaceState(pCreateInfo->surface); const auto old_swapchain_state = GetSwapchainState(pCreateInfo->oldSwapchain); return ValidateCreateSwapchain("vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state); } void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { if (swapchain) { auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data) { for (const auto &swapchain_image : swapchain_data->images) { if (!swapchain_image.image_state) continue; imageLayoutMap.erase(swapchain_image.image_state->image()); qfo_release_image_barrier_map.erase(swapchain_image.image_state->image()); } } } StateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator); } void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, VkImage *pSwapchainImages, VkResult result) { // This function will run twice. The first is to get pSwapchainImageCount. The second is to get pSwapchainImages. // The first time in StateTracker::PostCallRecordGetSwapchainImagesKHR only generates the container's size. // The second time in StateTracker::PostCallRecordGetSwapchainImagesKHR will create VKImage and IMAGE_STATE. // So GlobalImageLayoutMap saving new IMAGE_STATEs has to run in the second time. // pSwapchainImages is not nullptr and it needs to wait until StateTracker::PostCallRecordGetSwapchainImagesKHR. uint32_t new_swapchain_image_index = 0; if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) { auto swapchain_state = GetSwapchainState(swapchain); const auto image_vector_size = swapchain_state->images.size(); for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) { if ((new_swapchain_image_index >= image_vector_size) || !swapchain_state->images[new_swapchain_image_index].image_state) { break; }; } } StateTracker::PostCallRecordGetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages, result); if (((result == VK_SUCCESS) || (result == VK_INCOMPLETE)) && pSwapchainImages) { for (; new_swapchain_image_index < *pSwapchainImageCount; ++new_swapchain_image_index) { auto image_state = Get<IMAGE_STATE>(pSwapchainImages[new_swapchain_image_index]); AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap); } } } bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) const { bool skip = false; const auto queue_state = GetQueueState(queue); for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) { const auto semaphore_state = GetSemaphoreState(pPresentInfo->pWaitSemaphores[i]); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) { skip |= LogError(pPresentInfo->pWaitSemaphores[i], "VUID-vkQueuePresentKHR-pWaitSemaphores-03267", "vkQueuePresentKHR: pWaitSemaphores[%u] (%s) is not a VK_SEMAPHORE_TYPE_BINARY", i, report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } if (semaphore_state && !semaphore_state->signaled && !SemaphoreWasSignaled(pPresentInfo->pWaitSemaphores[i])) { LogObjectList objlist(queue); objlist.add(pPresentInfo->pWaitSemaphores[i]); skip |= LogError(objlist, "VUID-vkQueuePresentKHR-pWaitSemaphores-03268", "vkQueuePresentKHR: Queue %s is waiting on pWaitSemaphores[%u] (%s) that has no way to be signaled.", report_data->FormatHandle(queue).c_str(), i, report_data->FormatHandle(pPresentInfo->pWaitSemaphores[i]).c_str()); } } for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); if (swapchain_data) { // VU currently is 2-in-1, covers being a valid index and valid layout const char *validation_error = (device_extensions.vk_khr_shared_presentable_image) ? "VUID-VkPresentInfoKHR-pImageIndices-01430" : "VUID-VkPresentInfoKHR-pImageIndices-01296"; // Check if index is even possible to be acquired to give better error message if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) { skip |= LogError( pPresentInfo->pSwapchains[i], validation_error, "vkQueuePresentKHR: pSwapchains[%u] image index is too large (%u). There are only %u images in this swapchain.", i, pPresentInfo->pImageIndices[i], static_cast<uint32_t>(swapchain_data->images.size())); } else { const auto *image_state = swapchain_data->images[pPresentInfo->pImageIndices[i]].image_state; assert(image_state); if (!image_state->acquired) { skip |= LogError(pPresentInfo->pSwapchains[i], validation_error, "vkQueuePresentKHR: pSwapchains[%u] image index %u has not been acquired.", i, pPresentInfo->pImageIndices[i]); } vector<VkImageLayout> layouts; if (FindLayouts(*image_state, layouts)) { for (auto layout : layouts) { if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) && (!device_extensions.vk_khr_shared_presentable_image || (layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) { skip |= LogError(queue, validation_error, "vkQueuePresentKHR(): pSwapchains[%u] images passed to present must be in layout " "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR or " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR but is in %s.", i, string_VkImageLayout(layout)); } } } } // All physical devices and queue families are required to be able to present to any native window on Android; require // the application to have established support on any other platform. if (!instance_extensions.vk_khr_android_surface) { const auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface); auto support_it = surface_state->gpu_queue_support.find({physical_device, queue_state->queueFamilyIndex}); if (support_it == surface_state->gpu_queue_support.end()) { skip |= LogError( pPresentInfo->pSwapchains[i], kVUID_Core_DrawState_SwapchainUnsupportedQueue, "vkQueuePresentKHR: Presenting pSwapchains[%u] image without calling vkGetPhysicalDeviceSurfaceSupportKHR", i); } else if (!support_it->second) { skip |= LogError( pPresentInfo->pSwapchains[i], "VUID-vkQueuePresentKHR-pSwapchains-01292", "vkQueuePresentKHR: Presenting pSwapchains[%u] image on queue that cannot present to this surface.", i); } } } } if (pPresentInfo->pNext) { // Verify ext struct const auto *present_regions = LvlFindInChain<VkPresentRegionsKHR>(pPresentInfo->pNext); if (present_regions) { for (uint32_t i = 0; i < present_regions->swapchainCount; ++i) { const auto swapchain_data = GetSwapchainState(pPresentInfo->pSwapchains[i]); assert(swapchain_data); VkPresentRegionKHR region = present_regions->pRegions[i]; for (uint32_t j = 0; j < region.rectangleCount; ++j) { VkRectLayerKHR rect = region.pRectangles[j]; // Swap offsets and extents for 90 or 270 degree preTransform rotation if (swapchain_data->createInfo.preTransform & (VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR | VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR | VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR | VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR)) { std::swap(rect.offset.x, rect.offset.y); std::swap(rect.extent.width, rect.extent.height); } if ((rect.offset.x + rect.extent.width) > swapchain_data->createInfo.imageExtent.width) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], " "the sum of offset.x (%i) and extent.width (%i) after applying preTransform (%s) is greater " "than the corresponding swapchain's imageExtent.width (%i).", i, j, rect.offset.x, rect.extent.width, string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform), swapchain_data->createInfo.imageExtent.width); } if ((rect.offset.y + rect.extent.height) > swapchain_data->createInfo.imageExtent.height) { skip |= LogError(pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-offset-04864", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], " "the sum of offset.y (%i) and extent.height (%i) after applying preTransform (%s) is greater " "than the corresponding swapchain's imageExtent.height (%i).", i, j, rect.offset.y, rect.extent.height, string_VkSurfaceTransformFlagBitsKHR(swapchain_data->createInfo.preTransform), swapchain_data->createInfo.imageExtent.height); } if (rect.layer > swapchain_data->createInfo.imageArrayLayers) { skip |= LogError( pPresentInfo->pSwapchains[i], "VUID-VkRectLayerKHR-layer-01262", "vkQueuePresentKHR(): For VkPresentRegionKHR down pNext chain, pRegion[%i].pRectangles[%i], the layer " "(%i) is greater than the corresponding swapchain's imageArrayLayers (%i).", i, j, rect.layer, swapchain_data->createInfo.imageArrayLayers); } } } } const auto *present_times_info = LvlFindInChain<VkPresentTimesInfoGOOGLE>(pPresentInfo->pNext); if (present_times_info) { if (pPresentInfo->swapchainCount != present_times_info->swapchainCount) { skip |= LogError(pPresentInfo->pSwapchains[0], "VUID-VkPresentTimesInfoGOOGLE-swapchainCount-01247", "vkQueuePresentKHR(): VkPresentTimesInfoGOOGLE.swapchainCount is %i but pPresentInfo->swapchainCount " "is %i. For VkPresentTimesInfoGOOGLE down pNext chain of VkPresentInfoKHR, " "VkPresentTimesInfoGOOGLE.swapchainCount must equal VkPresentInfoKHR.swapchainCount.", present_times_info->swapchainCount, pPresentInfo->swapchainCount); } } } return skip; } bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) const { bool skip = false; if (pCreateInfos) { for (uint32_t i = 0; i < swapchainCount; i++) { const auto surface_state = GetSurfaceState(pCreateInfos[i].surface); const auto old_swapchain_state = GetSwapchainState(pCreateInfos[i].oldSwapchain); std::stringstream func_name; func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()"; skip |= ValidateCreateSwapchain(func_name.str().c_str(), &pCreateInfos[i], surface_state, old_swapchain_state); } } return skip; } bool CoreChecks::ValidateAcquireNextImage(VkDevice device, const CommandVersion cmd_version, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name, const char *semaphore_type_vuid) const { bool skip = false; auto semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_BINARY) { skip |= LogError(semaphore, semaphore_type_vuid, "%s: %s is not a VK_SEMAPHORE_TYPE_BINARY", func_name, report_data->FormatHandle(semaphore).c_str()); } if (semaphore_state && semaphore_state->scope == kSyncScopeInternal && semaphore_state->signaled) { skip |= LogError(semaphore, "VUID-vkAcquireNextImageKHR-semaphore-01286", "%s: Semaphore must not be currently signaled or in a wait state.", func_name); } auto fence_state = GetFenceState(fence); if (fence_state) { skip |= ValidateFenceForSubmit(fence_state, "VUID-vkAcquireNextImageKHR-fence-01287", "VUID-vkAcquireNextImageKHR-fence-01287", "vkAcquireNextImageKHR()"); } const auto swapchain_data = GetSwapchainState(swapchain); if (swapchain_data) { if (swapchain_data->retired) { skip |= LogError(swapchain, "VUID-vkAcquireNextImageKHR-swapchain-01285", "%s: This swapchain has been retired. The application can still present any images it " "has acquired, but cannot acquire any more.", func_name); } auto physical_device_state = GetPhysicalDeviceState(); // TODO: this is technically wrong on many levels, but requires massive cleanup if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHR_called) { const uint32_t acquired_images = static_cast<uint32_t>( std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(), [](const SWAPCHAIN_IMAGE &image) { return (image.image_state && image.image_state->acquired); })); const uint32_t swapchain_image_count = static_cast<uint32_t>(swapchain_data->images.size()); const auto min_image_count = physical_device_state->surfaceCapabilities.minImageCount; const bool too_many_already_acquired = acquired_images > swapchain_image_count - min_image_count; if (timeout == UINT64_MAX && too_many_already_acquired) { const char *vuid = "INVALID-vuid"; if (cmd_version == CMD_VERSION_1) { vuid = "VUID-vkAcquireNextImageKHR-swapchain-01802"; } else if (cmd_version == CMD_VERSION_2) { vuid = "VUID-vkAcquireNextImage2KHR-swapchain-01803"; } else { assert(false); } const uint32_t acquirable = swapchain_image_count - min_image_count + 1; skip |= LogError(swapchain, vuid, "%s: Application has already previously acquired %" PRIu32 " image%s from swapchain. Only %" PRIu32 " %s available to be acquired using a timeout of UINT64_MAX (given the swapchain has %" PRIu32 ", and VkSurfaceCapabilitiesKHR::minImageCount is %" PRIu32 ").", func_name, acquired_images, acquired_images > 1 ? "s" : "", acquirable, acquirable > 1 ? "are" : "is", swapchain_image_count, min_image_count); } } } return skip; } bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) const { return ValidateAcquireNextImage(device, CMD_VERSION_1, swapchain, timeout, semaphore, fence, pImageIndex, "vkAcquireNextImageKHR", "VUID-vkAcquireNextImageKHR-semaphore-03265"); } bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) const { bool skip = false; skip |= ValidateDeviceMaskToPhysicalDeviceCount(pAcquireInfo->deviceMask, pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01290"); skip |= ValidateDeviceMaskToZero(pAcquireInfo->deviceMask, pAcquireInfo->swapchain, "VUID-VkAcquireNextImageInfoKHR-deviceMask-01291"); skip |= ValidateAcquireNextImage(device, CMD_VERSION_2, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore, pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR", "VUID-VkAcquireNextImageInfoKHR-semaphore-03266"); return skip; } bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) const { const auto surface_state = GetSurfaceState(surface); bool skip = false; if ((surface_state) && (surface_state->swapchain)) { skip |= LogError(instance, "VUID-vkDestroySurfaceKHR-surface-01266", "vkDestroySurfaceKHR() called before its associated VkSwapchainKHR was destroyed."); } return skip; } #ifdef VK_USE_PLATFORM_WAYLAND_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display *display) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306", "vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WAYLAND_KHR #ifdef VK_USE_PLATFORM_WIN32_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309", "vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_WIN32_KHR #ifdef VK_USE_PLATFORM_XCB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t *connection, xcb_visualid_t visual_id) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312", "vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XCB_KHR #ifdef VK_USE_PLATFORM_XLIB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display *dpy, VisualID visualID) const { const auto pd_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(pd_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315", "vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex"); } #endif // VK_USE_PLATFORM_XLIB_KHR bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32 *pSupported) const { const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); return ValidateQueueFamilyIndex(physical_device_state, queueFamilyIndex, "VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269", "vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex"); } bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo) const { bool skip = false; const auto layout = GetDescriptorSetLayoutShared(pCreateInfo->descriptorSetLayout); if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) { skip |= LogError(pCreateInfo->descriptorSetLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350", "%s: Invalid pCreateInfo->descriptorSetLayout (%s)", func_name, report_data->FormatHandle(pCreateInfo->descriptorSetLayout).c_str()); } else if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR == pCreateInfo->templateType) { auto bind_point = pCreateInfo->pipelineBindPoint; bool valid_bp = (bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS) || (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) || (bind_point == VK_PIPELINE_BIND_POINT_RAY_TRACING_NV); if (!valid_bp) { skip |= LogError(device, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351", "%s: Invalid pCreateInfo->pipelineBindPoint (%" PRIu32 ").", func_name, static_cast<uint32_t>(bind_point)); } const auto pipeline_layout = GetPipelineLayout(pCreateInfo->pipelineLayout); if (!pipeline_layout) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352", "%s: Invalid pCreateInfo->pipelineLayout (%s)", func_name, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str()); } else { const uint32_t pd_set = pCreateInfo->set; if ((pd_set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[pd_set] || !pipeline_layout->set_layouts[pd_set]->IsPushDescriptor()) { skip |= LogError(pCreateInfo->pipelineLayout, "VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353", "%s: pCreateInfo->set (%" PRIu32 ") does not refer to the push descriptor set layout for pCreateInfo->pipelineLayout (%s).", func_name, pd_set, report_data->FormatHandle(pCreateInfo->pipelineLayout).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", pCreateInfo); return skip; } bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkDescriptorUpdateTemplate *pDescriptorUpdateTemplate) const { bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", pCreateInfo); return skip; } bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { bool skip = false; auto const template_map_entry = desc_template_map.find(descriptorUpdateTemplate); if ((template_map_entry == desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) { // Object tracker will report errors for invalid descriptorUpdateTemplate values, avoiding a crash in release builds // but retaining the assert as template support is new enough to want to investigate these in debug builds. assert(0); } else { const TEMPLATE_STATE *template_state = template_map_entry->second.get(); // TODO: Validate template push descriptor updates if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) { skip = ValidateUpdateDescriptorSetsWithTemplateKHR(descriptorSet, template_state, pData); } } return skip; } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) const { return ValidateUpdateDescriptorSetWithTemplate(descriptorSet, descriptorUpdateTemplate, pData); } bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void *pData) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *const func_name = "vkPushDescriptorSetWithTemplateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_PUSHDESCRIPTORSETWITHTEMPLATEKHR, func_name); const auto layout_data = GetPipelineLayout(layout); const auto dsl = layout_data ? layout_data->GetDsl(set) : nullptr; // Validate the set index points to a push descriptor set and is in range if (dsl) { if (!dsl->IsPushDescriptor()) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00365", "%s: Set index %" PRIu32 " does not match push descriptor set layout index for %s.", func_name, set, report_data->FormatHandle(layout).c_str()); } } else if (layout_data && (set >= layout_data->set_layouts.size())) { skip = LogError(layout, "VUID-vkCmdPushDescriptorSetKHR-set-00364", "%s: Set index %" PRIu32 " is outside of range for %s (set < %" PRIu32 ").", func_name, set, report_data->FormatHandle(layout).c_str(), static_cast<uint32_t>(layout_data->set_layouts.size())); } const auto template_state = GetDescriptorTemplateState(descriptorUpdateTemplate); if (template_state) { const auto &template_ci = template_state->create_info; static const std::map<VkPipelineBindPoint, std::string> bind_errors = { std::make_pair(VK_PIPELINE_BIND_POINT_GRAPHICS, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_COMPUTE, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366"), std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, "VUID-vkCmdPushDescriptorSetWithTemplateKHR-commandBuffer-00366")}; skip |= ValidatePipelineBindPoint(cb_state, template_ci.pipelineBindPoint, func_name, bind_errors); if (template_ci.templateType != VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR) { skip |= LogError(cb_state->commandBuffer(), kVUID_Core_PushDescriptorUpdate_TemplateType, "%s: descriptorUpdateTemplate %s was not created with flag " "VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR.", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str()); } if (template_ci.set != set) { skip |= LogError(cb_state->commandBuffer(), kVUID_Core_PushDescriptorUpdate_Template_SetMismatched, "%s: descriptorUpdateTemplate %s created with set %" PRIu32 " does not match command parameter set %" PRIu32 ".", func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), template_ci.set, set); } if (!CompatForSet(set, layout_data, GetPipelineLayout(template_ci.pipelineLayout))) { LogObjectList objlist(cb_state->commandBuffer()); objlist.add(descriptorUpdateTemplate); objlist.add(template_ci.pipelineLayout); objlist.add(layout); skip |= LogError(objlist, kVUID_Core_PushDescriptorUpdate_Template_LayoutMismatched, "%s: descriptorUpdateTemplate %s created with %s is incompatible with command parameter " "%s for set %" PRIu32, func_name, report_data->FormatHandle(descriptorUpdateTemplate).c_str(), report_data->FormatHandle(template_ci.pipelineLayout).c_str(), report_data->FormatHandle(layout).c_str(), set); } } if (dsl && template_state) { // Create an empty proxy in order to use the existing descriptor set update validation cvdescriptorset::DescriptorSet proxy_ds(VK_NULL_HANDLE, nullptr, dsl, 0, this); // Decode the template into a set of write updates cvdescriptorset::DecodedTemplateUpdate decoded_template(this, VK_NULL_HANDLE, template_state, pData, dsl->GetDescriptorSetLayout()); // Validate the decoded update against the proxy_ds skip |= ValidatePushDescriptorsUpdate(&proxy_ds, static_cast<uint32_t>(decoded_template.desc_writes.size()), decoded_template.desc_writes.data(), func_name); } return skip; } bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, uint32_t planeIndex, const char *api_name) const { bool skip = false; const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) { if (planeIndex >= physical_device_state->display_plane_property_count) { skip |= LogError(physicalDevice, "VUID-vkGetDisplayPlaneSupportedDisplaysKHR-planeIndex-01249", "%s(): planeIndex (%u) must be in the range [0, %d] that was returned by " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", api_name, planeIndex, physical_device_state->display_plane_property_count - 1); } } return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneSupportedDisplaysKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, planeIndex, "vkGetDisplayPlaneCapabilitiesKHR"); return skip; } bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR *pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, pDisplayPlaneInfo->planeIndex, "vkGetDisplayPlaneCapabilities2KHR"); return skip; } bool CoreChecks::PreCallValidateCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) const { bool skip = false; const VkDisplayModeKHR display_mode = pCreateInfo->displayMode; const uint32_t plane_index = pCreateInfo->planeIndex; if (pCreateInfo->alphaMode == VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR) { const float global_alpha = pCreateInfo->globalAlpha; if ((global_alpha > 1.0f) || (global_alpha < 0.0f)) { skip |= LogError( display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01254", "vkCreateDisplayPlaneSurfaceKHR(): alphaMode is VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR but globalAlpha is %f.", global_alpha); } } const DISPLAY_MODE_STATE *dm_state = GetDisplayModeState(display_mode); if (dm_state != nullptr) { // Get physical device from VkDisplayModeKHR state tracking const VkPhysicalDevice physical_device = dm_state->physical_device; const auto physical_device_state = GetPhysicalDeviceState(physical_device); VkPhysicalDeviceProperties device_properties = {}; DispatchGetPhysicalDeviceProperties(physical_device, &device_properties); const uint32_t width = pCreateInfo->imageExtent.width; const uint32_t height = pCreateInfo->imageExtent.height; if (width >= device_properties.limits.maxImageDimension2D) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256", "vkCreateDisplayPlaneSurfaceKHR(): width (%" PRIu32 ") exceeds device limit maxImageDimension2D (%" PRIu32 ").", width, device_properties.limits.maxImageDimension2D); } if (height >= device_properties.limits.maxImageDimension2D) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-width-01256", "vkCreateDisplayPlaneSurfaceKHR(): height (%" PRIu32 ") exceeds device limit maxImageDimension2D (%" PRIu32 ").", height, device_properties.limits.maxImageDimension2D); } if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHR_called) { if (plane_index >= physical_device_state->display_plane_property_count) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-planeIndex-01252", "vkCreateDisplayPlaneSurfaceKHR(): planeIndex (%u) must be in the range [0, %d] that was returned by " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR " "or vkGetPhysicalDeviceDisplayPlaneProperties2KHR. Do you have the plane index hardcoded?", plane_index, physical_device_state->display_plane_property_count - 1); } else { // call here once we know the plane index used is a valid plane index VkDisplayPlaneCapabilitiesKHR plane_capabilities; DispatchGetDisplayPlaneCapabilitiesKHR(physical_device, display_mode, plane_index, &plane_capabilities); if ((pCreateInfo->alphaMode & plane_capabilities.supportedAlpha) == 0) { skip |= LogError(display_mode, "VUID-VkDisplaySurfaceCreateInfoKHR-alphaMode-01255", "vkCreateDisplayPlaneSurfaceKHR(): alphaMode is %s but planeIndex %u supportedAlpha (0x%x) " "does not support the mode.", string_VkDisplayPlaneAlphaFlagBitsKHR(pCreateInfo->alphaMode), plane_index, plane_capabilities.supportedAlpha); } } } } return skip; } bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()"); } bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); return ValidateCmd(cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()"); } bool CoreChecks::PreCallValidateCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) const { if (disabled[query_validation]) return false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); QueryObject query_obj(queryPool, query, index); const char *cmd_name = "vkCmdBeginQueryIndexedEXT()"; struct BeginQueryIndexedVuids : ValidateBeginQueryVuids { BeginQueryIndexedVuids() : ValidateBeginQueryVuids() { vuid_queue_flags = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-cmdpool"; vuid_queue_feedback = "VUID-vkCmdBeginQueryIndexedEXT-queryType-02338"; vuid_queue_occlusion = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00803"; vuid_precise = "VUID-vkCmdBeginQueryIndexedEXT-queryType-00800"; vuid_query_count = "VUID-vkCmdBeginQueryIndexedEXT-query-00802"; vuid_profile_lock = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03223"; vuid_scope_not_first = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03224"; vuid_scope_in_rp = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-03225"; vuid_dup_query_type = "VUID-vkCmdBeginQueryIndexedEXT-queryPool-04753"; vuid_protected_cb = "VUID-vkCmdBeginQueryIndexedEXT-commandBuffer-01885"; } }; BeginQueryIndexedVuids vuids; bool skip = ValidateBeginQuery(cb_state, query_obj, flags, index, CMD_BEGINQUERYINDEXEDEXT, cmd_name, &vuids); // Extension specific VU's const auto &query_pool_ci = GetQueryPoolState(query_obj.pool)->createInfo; if (query_pool_ci.queryType == VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT) { if (device_extensions.vk_ext_transform_feedback && (index >= phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams)) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02339", "%s: index %" PRIu32 " must be less than VkPhysicalDeviceTransformFeedbackPropertiesEXT::maxTransformFeedbackStreams %" PRIu32 ".", cmd_name, index, phys_dev_ext_props.transform_feedback_props.maxTransformFeedbackStreams); } } else if (index != 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdBeginQueryIndexedEXT-queryType-02340", "%s: index %" PRIu32 " must be zero if %s was not created with type VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT.", cmd_name, index, report_data->FormatHandle(queryPool).c_str()); } return skip; } void CoreChecks::PreCallRecordCmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index) { if (disabled[query_validation]) return; QueryObject query_obj = {queryPool, query, index}; EnqueueVerifyBeginQuery(commandBuffer, query_obj, "vkCmdBeginQueryIndexedEXT()"); } void CoreChecks::PreCallRecordCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) { if (disabled[query_validation]) return; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); QueryObject query_obj = {queryPool, query, index}; query_obj.endCommandIndex = cb_state->commandCount - 1; EnqueueVerifyEndQuery(commandBuffer, query_obj); } bool CoreChecks::PreCallValidateCmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index) const { if (disabled[query_validation]) return false; QueryObject query_obj = {queryPool, query, index}; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); struct EndQueryIndexedVuids : ValidateEndQueryVuids { EndQueryIndexedVuids() : ValidateEndQueryVuids() { vuid_queue_flags = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-cmdpool"; vuid_active_queries = "VUID-vkCmdEndQueryIndexedEXT-None-02342"; vuid_protected_cb = "VUID-vkCmdEndQueryIndexedEXT-commandBuffer-02344"; } }; EndQueryIndexedVuids vuids; return ValidateCmdEndQuery(cb_state, query_obj, index, CMD_ENDQUERYINDEXEDEXT, "vkCmdEndQueryIndexedEXT()", &vuids); } bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; // Minimal validation for command buffer state skip |= ValidateCmd(cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()"); skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetDiscardRectangleEXT-viewportScissor2D-04788", "vkCmdSetDiscardRectangleEXT"); return skip; } bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); // Minimal validation for command buffer state skip |= ValidateCmd(cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()"); skip |= ValidateSampleLocationsInfo(pSampleLocationsInfo, "vkCmdSetSampleLocationsEXT"); const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS); const auto *pipe = cb_state->lastBound[lv_bind_point].pipeline_state; if (pipe != nullptr) { // Check same error with different log messages const safe_VkPipelineMultisampleStateCreateInfo *multisample_state = pipe->graphicsPipelineCI.pMultisampleState; if (multisample_state == nullptr) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529", "vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel must be equal to " "rasterizationSamples, but the bound graphics pipeline was created without a multisample state"); } else if (multisample_state->rasterizationSamples != pSampleLocationsInfo->sampleLocationsPerPixel) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetSampleLocationsEXT-sampleLocationsPerPixel-01529", "vkCmdSetSampleLocationsEXT(): pSampleLocationsInfo->sampleLocationsPerPixel (%s) is not equal to " "the last bound pipeline's rasterizationSamples (%s)", string_VkSampleCountFlagBits(pSampleLocationsInfo->sampleLocationsPerPixel), string_VkSampleCountFlagBits(multisample_state->rasterizationSamples)); } } return skip; } bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const char *func_name, const VkSamplerYcbcrConversionCreateInfo *create_info) const { bool skip = false; const VkFormat conversion_format = create_info->format; // Need to check for external format conversion first as it allows for non-UNORM format bool external_format = false; #ifdef VK_USE_PLATFORM_ANDROID_KHR const VkExternalFormatANDROID *ext_format_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext); if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) { external_format = true; if (VK_FORMAT_UNDEFINED != create_info->format) { return LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01904", "%s: CreateInfo format is not VK_FORMAT_UNDEFINED while " "there is a chained VkExternalFormatANDROID struct with a non-zero externalFormat.", func_name); } } #endif // VK_USE_PLATFORM_ANDROID_KHR if ((external_format == false) && (FormatIsUNorm(conversion_format) == false)) { const char *vuid = (device_extensions.vk_android_external_memory_android_hardware_buffer) ? "VUID-VkSamplerYcbcrConversionCreateInfo-format-04061" : "VUID-VkSamplerYcbcrConversionCreateInfo-format-04060"; skip |= LogError(device, vuid, "%s: CreateInfo format (%s) is not an UNORM format and there is no external format conversion being created.", func_name, string_VkFormat(conversion_format)); } // Gets VkFormatFeatureFlags according to Sampler Ycbcr Conversion Format Features // (vkspec.html#potential-format-features) VkFormatFeatureFlags format_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM; if (conversion_format == VK_FORMAT_UNDEFINED) { #ifdef VK_USE_PLATFORM_ANDROID_KHR // only check for external format inside VK_FORMAT_UNDEFINED check to prevent unnecessary extra errors from no format // features being supported if (external_format == true) { auto it = ahb_ext_formats_map.find(ext_format_android->externalFormat); if (it != ahb_ext_formats_map.end()) { format_features = it->second; } } #endif // VK_USE_PLATFORM_ANDROID_KHR } else { format_features = GetPotentialFormatFeatures(conversion_format); } // Check all VUID that are based off of VkFormatFeatureFlags // These can't be in StatelessValidation due to needing possible External AHB state for feature support if (((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) && ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-format-01650", "%s: Format %s does not support either VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or " "VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT", func_name, string_VkFormat(conversion_format)); } if ((format_features & VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) == 0) { if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651", "%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so xChromaOffset can't " "be VK_CHROMA_LOCATION_COSITED_EVEN", func_name, string_VkFormat(conversion_format)); } if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_COSITED_EVEN) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01651", "%s: Format %s does not support VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT so yChromaOffset can't " "be VK_CHROMA_LOCATION_COSITED_EVEN", func_name, string_VkFormat(conversion_format)); } } if ((format_features & VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT) == 0) { if (FormatIsXChromaSubsampled(conversion_format) && create_info->xChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652", "%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so xChromaOffset can't " "be VK_CHROMA_LOCATION_MIDPOINT", func_name, string_VkFormat(conversion_format)); } if (FormatIsYChromaSubsampled(conversion_format) && create_info->yChromaOffset == VK_CHROMA_LOCATION_MIDPOINT) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-xChromaOffset-01652", "%s: Format %s does not support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT so yChromaOffset can't " "be VK_CHROMA_LOCATION_MIDPOINT", func_name, string_VkFormat(conversion_format)); } } if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT) == 0) && (create_info->forceExplicitReconstruction == VK_TRUE)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-forceExplicitReconstruction-01656", "%s: Format %s does not support " "VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT so " "forceExplicitReconstruction must be VK_FALSE", func_name, string_VkFormat(conversion_format)); } if (((format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT) == 0) && (create_info->chromaFilter == VK_FILTER_LINEAR)) { skip |= LogError(device, "VUID-VkSamplerYcbcrConversionCreateInfo-chromaFilter-01657", "%s: Format %s does not support VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT so " "chromaFilter must not be VK_FILTER_LINEAR", func_name, string_VkFormat(conversion_format)); } return skip; } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversion()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion) const { return ValidateCreateSamplerYcbcrConversion("vkCreateSamplerYcbcrConversionKHR()", pCreateInfo); } bool CoreChecks::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) const { bool skip = false; if (samplerMap.size() >= phys_dev_props.limits.maxSamplerAllocationCount) { skip |= LogError( device, "VUID-vkCreateSampler-maxSamplerAllocationCount-04110", "vkCreateSampler(): Number of currently valid sampler objects (%zu) is not less than the maximum allowed (%u).", samplerMap.size(), phys_dev_props.limits.maxSamplerAllocationCount); } if (enabled_features.core11.samplerYcbcrConversion == VK_TRUE) { const VkSamplerYcbcrConversionInfo *conversion_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(pCreateInfo->pNext); if (conversion_info != nullptr) { const VkSamplerYcbcrConversion sampler_ycbcr_conversion = conversion_info->conversion; const SAMPLER_YCBCR_CONVERSION_STATE *ycbcr_state = GetSamplerYcbcrConversionState(sampler_ycbcr_conversion); if ((ycbcr_state->format_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT) == 0) { const VkFilter chroma_filter = ycbcr_state->chromaFilter; if (pCreateInfo->minFilter != chroma_filter) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-minFilter-01645", "VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is " "not supported for SamplerYcbcrConversion's (%s) format %s so minFilter (%s) needs to be equal to " "chromaFilter (%s)", report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format), string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter)); } if (pCreateInfo->magFilter != chroma_filter) { skip |= LogError( device, "VUID-VkSamplerCreateInfo-minFilter-01645", "VkCreateSampler: VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT is " "not supported for SamplerYcbcrConversion's (%s) format %s so minFilter (%s) needs to be equal to " "chromaFilter (%s)", report_data->FormatHandle(sampler_ycbcr_conversion).c_str(), string_VkFormat(ycbcr_state->format), string_VkFilter(pCreateInfo->minFilter), string_VkFilter(chroma_filter)); } } // At this point there is a known sampler YCbCr conversion enabled const auto *sampler_reduction = LvlFindInChain<VkSamplerReductionModeCreateInfo>(pCreateInfo->pNext); if (sampler_reduction != nullptr) { if (sampler_reduction->reductionMode != VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-01647", "A sampler YCbCr Conversion is being used creating this sampler so the sampler reduction mode " "must be VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE."); } } } } if (pCreateInfo->borderColor == VK_BORDER_COLOR_INT_CUSTOM_EXT || pCreateInfo->borderColor == VK_BORDER_COLOR_FLOAT_CUSTOM_EXT) { if (!enabled_features.custom_border_color_features.customBorderColors) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-customBorderColors-04085", "vkCreateSampler(): A custom border color was specified without enabling the custom border color feature"); } auto custom_create_info = LvlFindInChain<VkSamplerCustomBorderColorCreateInfoEXT>(pCreateInfo->pNext); if (custom_create_info) { if (custom_create_info->format == VK_FORMAT_UNDEFINED && !enabled_features.custom_border_color_features.customBorderColorWithoutFormat) { skip |= LogError(device, "VUID-VkSamplerCustomBorderColorCreateInfoEXT-format-04014", "vkCreateSampler(): A custom border color was specified as VK_FORMAT_UNDEFINED without the " "customBorderColorWithoutFormat feature being enabled"); } } if (custom_border_color_sampler_count >= phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-None-04012", "vkCreateSampler(): Creating a sampler with a custom border color will exceed the " "maxCustomBorderColorSamplers limit of %d", phys_dev_ext_props.custom_border_color_props.maxCustomBorderColorSamplers); } } if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) { if ((VK_FALSE == enabled_features.portability_subset_features.samplerMipLodBias) && pCreateInfo->mipLodBias != 0) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-samplerMipLodBias-04467", "vkCreateSampler (portability error): mip LOD bias not supported."); } } // If any of addressModeU, addressModeV or addressModeW are VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, the // VK_KHR_sampler_mirror_clamp_to_edge extension or promoted feature must be enabled if ((device_extensions.vk_khr_sampler_mirror_clamp_to_edge != kEnabledByCreateinfo) && (enabled_features.core12.samplerMirrorClampToEdge == VK_FALSE)) { // Use 'else' because getting 3 large error messages is redundant and assume developer, if set all 3, will notice and fix // all at once if (pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): addressModeU is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the " "VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled."); } else if (pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): addressModeV is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the " "VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled."); } else if (pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE) { skip |= LogError(device, "VUID-VkSamplerCreateInfo-addressModeU-01079", "vkCreateSampler(): addressModeW is set to VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE but the " "VK_KHR_sampler_mirror_clamp_to_edge extension or samplerMirrorClampToEdge feature has not been enabled."); } } return skip; } bool CoreChecks::ValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress && !enabled_features.buffer_device_address_ext.bufferDeviceAddress) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-bufferDeviceAddress-03324", "%s: The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice && !enabled_features.buffer_device_address_ext.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferDeviceAddress-device-03325", "%s: If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } const auto buffer_state = GetBufferState(pInfo->buffer); if (buffer_state) { if (!(buffer_state->createInfo.flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT)) { skip |= ValidateMemoryIsBoundToBuffer(buffer_state, apiName, "VUID-VkBufferDeviceAddressInfo-buffer-02600"); } skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, true, "VUID-VkBufferDeviceAddressInfo-buffer-02601", apiName, "VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT"); } return skip; } bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddressEXT"); } bool CoreChecks::PreCallValidateGetBufferDeviceAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddressKHR"); } bool CoreChecks::PreCallValidateGetBufferDeviceAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferDeviceAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferDeviceAddress"); } bool CoreChecks::ValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-None-03326", "%s(): The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->buffer, "VUID-vkGetBufferOpaqueCaptureAddress-device-03327", "%s(): If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } return skip; } bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddressKHR(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferOpaqueCaptureAddressKHR"); } bool CoreChecks::PreCallValidateGetBufferOpaqueCaptureAddress(VkDevice device, const VkBufferDeviceAddressInfo *pInfo) const { return ValidateGetBufferOpaqueCaptureAddress(device, static_cast<const VkBufferDeviceAddressInfo *>(pInfo), "vkGetBufferOpaqueCaptureAddress"); } bool CoreChecks::ValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo, const char *apiName) const { bool skip = false; if (!enabled_features.core12.bufferDeviceAddress) { skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-None-03334", "%s(): The bufferDeviceAddress feature must: be enabled.", apiName); } if (physical_device_count > 1 && !enabled_features.core12.bufferDeviceAddressMultiDevice) { skip |= LogError(pInfo->memory, "VUID-vkGetDeviceMemoryOpaqueCaptureAddress-device-03335", "%s(): If device was created with multiple physical devices, then the " "bufferDeviceAddressMultiDevice feature must: be enabled.", apiName); } const DEVICE_MEMORY_STATE *mem_info = GetDevMemState(pInfo->memory); if (mem_info) { auto chained_flags_struct = LvlFindInChain<VkMemoryAllocateFlagsInfo>(mem_info->alloc_info.pNext); if (!chained_flags_struct || !(chained_flags_struct->flags & VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT)) { skip |= LogError(pInfo->memory, "VUID-VkDeviceMemoryOpaqueCaptureAddressInfo-memory-03336", "%s(): memory must have been allocated with VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT.", apiName); } } return skip; } bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddressKHR(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const { return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo), "vkGetDeviceMemoryOpaqueCaptureAddressKHR"); } bool CoreChecks::PreCallValidateGetDeviceMemoryOpaqueCaptureAddress(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo *pInfo) const { return ValidateGetDeviceMemoryOpaqueCaptureAddress(device, static_cast<const VkDeviceMemoryOpaqueCaptureAddressInfo *>(pInfo), "vkGetDeviceMemoryOpaqueCaptureAddress"); } bool CoreChecks::ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery, uint32_t queryCount, const char *vuid_badfirst, const char *vuid_badrange, const char *apiName) const { bool skip = false; if (firstQuery >= totalCount) { skip |= LogError(device, vuid_badfirst, "%s(): firstQuery (%" PRIu32 ") greater than or equal to query pool count (%" PRIu32 ") for %s", apiName, firstQuery, totalCount, report_data->FormatHandle(queryPool).c_str()); } if ((firstQuery + queryCount) > totalCount) { skip |= LogError(device, vuid_badrange, "%s(): Query range [%" PRIu32 ", %" PRIu32 ") goes beyond query pool count (%" PRIu32 ") for %s", apiName, firstQuery, firstQuery + queryCount, totalCount, report_data->FormatHandle(queryPool).c_str()); } return skip; } bool CoreChecks::ValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, const char *apiName) const { if (disabled[query_validation]) return false; bool skip = false; if (!enabled_features.core12.hostQueryReset) { skip |= LogError(device, "VUID-vkResetQueryPool-None-02665", "%s(): Host query reset not enabled for device", apiName); } const auto query_pool_state = GetQueryPoolState(queryPool); if (query_pool_state) { skip |= ValidateQueryRange(device, queryPool, query_pool_state->createInfo.queryCount, firstQuery, queryCount, "VUID-vkResetQueryPool-firstQuery-02666", "VUID-vkResetQueryPool-firstQuery-02667", apiName); } return skip; } bool CoreChecks::PreCallValidateResetQueryPoolEXT(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPoolEXT"); } bool CoreChecks::PreCallValidateResetQueryPool(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) const { return ValidateResetQueryPool(device, queryPool, firstQuery, queryCount, "vkResetQueryPool"); } VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkValidationCacheEXT *pValidationCache) { *pValidationCache = ValidationCache::Create(pCreateInfo); return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED; } void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks *pAllocator) { delete CastFromHandle<ValidationCache *>(validationCache); } VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize, void *pData) { size_t in_size = *pDataSize; CastFromHandle<ValidationCache *>(validationCache)->Write(pDataSize, pData); return (pData && *pDataSize != in_size) ? VK_INCOMPLETE : VK_SUCCESS; } VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT *pSrcCaches) { bool skip = false; auto dst = CastFromHandle<ValidationCache *>(dstCache); VkResult result = VK_SUCCESS; for (uint32_t i = 0; i < srcCacheCount; i++) { auto src = CastFromHandle<const ValidationCache *>(pSrcCaches[i]); if (src == dst) { skip |= LogError(device, "VUID-vkMergeValidationCachesEXT-dstCache-01536", "vkMergeValidationCachesEXT: dstCache (0x%" PRIx64 ") must not appear in pSrcCaches array.", HandleToUint64(dstCache)); result = VK_ERROR_VALIDATION_FAILED_EXT; } if (!skip) { dst->Merge(src); } } return result; } bool CoreChecks::ValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask, const char *func_name) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateCmd(cb_state, CMD_SETDEVICEMASK, func_name); skip |= ValidateDeviceMaskToPhysicalDeviceCount(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00108"); skip |= ValidateDeviceMaskToZero(deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00109"); skip |= ValidateDeviceMaskToCommandBuffer(cb_state, deviceMask, commandBuffer, "VUID-vkCmdSetDeviceMask-deviceMask-00110"); if (cb_state->activeRenderPass) { skip |= ValidateDeviceMaskToRenderPass(cb_state, deviceMask, "VUID-vkCmdSetDeviceMask-deviceMask-00111"); } return skip; } bool CoreChecks::PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask) const { return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMask()"); } bool CoreChecks::PreCallValidateCmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask) const { return ValidateCmdSetDeviceMask(commandBuffer, deviceMask, "vkSetDeviceMaskKHR()"); } bool CoreChecks::ValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue, const char *apiName) const { bool skip = false; const auto *semaphore_state = GetSemaphoreState(semaphore); if (semaphore_state && semaphore_state->type != VK_SEMAPHORE_TYPE_TIMELINE) { skip |= LogError(semaphore, "VUID-vkGetSemaphoreCounterValue-semaphore-03255", "%s(): semaphore %s must be of VK_SEMAPHORE_TYPE_TIMELINE type", apiName, report_data->FormatHandle(semaphore).c_str()); } return skip; } bool CoreChecks::PreCallValidateGetSemaphoreCounterValueKHR(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const { return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValueKHR"); } bool CoreChecks::PreCallValidateGetSemaphoreCounterValue(VkDevice device, VkSemaphore semaphore, uint64_t *pValue) const { return ValidateGetSemaphoreCounterValue(device, semaphore, pValue, "vkGetSemaphoreCounterValue"); } bool CoreChecks::ValidateQueryPoolStride(const std::string &vuid_not_64, const std::string &vuid_64, const VkDeviceSize stride, const char *parameter_name, const uint64_t parameter_value, const VkQueryResultFlags flags) const { bool skip = false; if (flags & VK_QUERY_RESULT_64_BIT) { static const int condition_multiples = 0b0111; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= LogError(device, vuid_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } else { static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (parameter_value & condition_multiples)) { skip |= LogError(device, vuid_not_64, "stride %" PRIx64 " or %s %" PRIx64 " is invalid.", stride, parameter_name, parameter_value); } } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithStruct(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size) const { bool skip = false; static const int condition_multiples = 0b0011; if ((stride & condition_multiples) || (stride < struct_size)) { skip |= LogError(commandBuffer, vuid, "stride %d is invalid or less than sizeof(%s) %d.", stride, struct_name, struct_size); } return skip; } bool CoreChecks::ValidateCmdDrawStrideWithBuffer(VkCommandBuffer commandBuffer, const std::string &vuid, const uint32_t stride, const char *struct_name, const uint32_t struct_size, const uint32_t drawCount, const VkDeviceSize offset, const BUFFER_STATE *buffer_state) const { bool skip = false; uint64_t validation_value = stride * (drawCount - 1) + offset + struct_size; if (validation_value > buffer_state->createInfo.size) { skip |= LogError(commandBuffer, vuid, "stride[%d] * (drawCount[%d] - 1) + offset[%" PRIx64 "] + sizeof(%s)[%d] = %" PRIx64 " is greater than the size[%" PRIx64 "] of %s.", stride, drawCount, offset, struct_name, struct_size, validation_value, buffer_state->createInfo.size, report_data->FormatHandle(buffer_state->buffer()).c_str()); } return skip; } bool CoreChecks::PreCallValidateReleaseProfilingLockKHR(VkDevice device) const { bool skip = false; if (!performance_lock_acquired) { skip |= LogError(device, "VUID-vkReleaseProfilingLockKHR-device-03235", "vkReleaseProfilingLockKHR(): The profiling lock of device must have been held via a previous successful " "call to vkAcquireProfilingLockKHR."); } return skip; } bool CoreChecks::PreCallValidateCmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void *pCheckpointMarker) const { { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETCHECKPOINTNV, "vkCmdSetCheckpointNV()"); return skip; } } bool CoreChecks::PreCallValidateWriteAccelerationStructuresPropertiesKHR(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, size_t dataSize, void *pData, size_t stride) const { bool skip = false; for (uint32_t i = 0; i < accelerationStructureCount; ++i) { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]); const auto &as_info = as_state->build_info_khr; if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { if (!(as_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-vkWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431", "vkWriteAccelerationStructuresPropertiesKHR: All acceleration structures (%s) in " "pAccelerationStructures must have been built with" "VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR.", report_data->FormatHandle(as_state->acceleration_structure()).c_str()); } } } return skip; } bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesKHR( VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESKHR, "vkCmdWriteAccelerationStructuresPropertiesKHR()"); const auto *query_pool_state = GetQueryPoolState(queryPool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != queryType) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-queryPool-02493", "vkCmdWriteAccelerationStructuresPropertiesKHR: queryPool must have been created with a queryType matching queryType."); } for (uint32_t i = 0; i < accelerationStructureCount; ++i) { if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR) { const ACCELERATION_STRUCTURE_STATE_KHR *as_state = GetAccelerationStructureStateKHR(pAccelerationStructures[i]); if (!(as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesKHR-accelerationStructures-03431", "vkCmdWriteAccelerationStructuresPropertiesKHR: All acceleration structures in pAccelerationStructures " "must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR."); } } } return skip; } bool CoreChecks::PreCallValidateCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV *pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); skip |= ValidateCmd(cb_state, CMD_WRITEACCELERATIONSTRUCTURESPROPERTIESNV, "vkCmdWriteAccelerationStructuresPropertiesNV()"); const auto *query_pool_state = GetQueryPoolState(queryPool); const auto &query_pool_ci = query_pool_state->createInfo; if (query_pool_ci.queryType != queryType) { skip |= LogError( device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-queryPool-03755", "vkCmdWriteAccelerationStructuresPropertiesNV: queryPool must have been created with a queryType matching queryType."); } for (uint32_t i = 0; i < accelerationStructureCount; ++i) { if (queryType == VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV) { const ACCELERATION_STRUCTURE_STATE *as_state = GetAccelerationStructureStateNV(pAccelerationStructures[i]); if (!(as_state->build_info.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdWriteAccelerationStructuresPropertiesNV-accelerationStructures-03431", "vkCmdWriteAccelerationStructuresPropertiesNV: All acceleration structures in pAccelerationStructures " "must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR if queryType is " "VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV."); } } } return skip; } uint32_t CoreChecks::CalcTotalShaderGroupCount(const PIPELINE_STATE *pipelineState) const { uint32_t total = pipelineState->raytracingPipelineCI.groupCount; if (pipelineState->raytracingPipelineCI.pLibraryInfo) { for (uint32_t i = 0; i < pipelineState->raytracingPipelineCI.pLibraryInfo->libraryCount; ++i) { const PIPELINE_STATE *library_pipeline_state = GetPipelineState(pipelineState->raytracingPipelineCI.pLibraryInfo->pLibraries[i]); total += CalcTotalShaderGroupCount(library_pipeline_state); } } return total; } bool CoreChecks::PreCallValidateGetRayTracingShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); if (pipeline_state->getPipelineCreateFlags() & VK_PIPELINE_CREATE_LIBRARY_BIT_KHR) { skip |= LogError( device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-pipeline-03482", "vkGetRayTracingShaderGroupHandlesKHR: pipeline must have not been created with VK_PIPELINE_CREATE_LIBRARY_BIT_KHR."); } if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleSize * groupCount)) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-dataSize-02420", "vkGetRayTracingShaderGroupHandlesKHR: dataSize (%zu) must be at least " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleSize * groupCount.", dataSize); } uint32_t total_group_count = CalcTotalShaderGroupCount(pipeline_state); if (firstGroup >= total_group_count) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-04050", "vkGetRayTracingShaderGroupHandlesKHR: firstGroup must be less than the number of shader groups in pipeline."); } if ((firstGroup + groupCount) > total_group_count) { skip |= LogError( device, "VUID-vkGetRayTracingShaderGroupHandlesKHR-firstGroup-02419", "vkGetRayTracingShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less than or equal the number " "of shader groups in pipeline."); } return skip; } bool CoreChecks::PreCallValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void *pData) const { bool skip = false; if (dataSize < (phys_dev_ext_props.ray_tracing_propsKHR.shaderGroupHandleCaptureReplaySize * groupCount)) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: dataSize (%zu) must be at least " "VkPhysicalDeviceRayTracingPipelinePropertiesKHR::shaderGroupHandleCaptureReplaySize * groupCount.", dataSize); } const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); if (!pipeline_state) { return skip; } if (firstGroup >= pipeline_state->raytracingPipelineCI.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: firstGroup must be less than the number of shader " "groups in pipeline."); } if ((firstGroup + groupCount) > pipeline_state->raytracingPipelineCI.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483", "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR: The sum of firstGroup and groupCount must be less " "than or equal to the number of shader groups in pipeline."); } if (!(pipeline_state->raytracingPipelineCI.flags & VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR)) { skip |= LogError(device, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-pipeline-03607", "pipeline must have been created with a flags that included " "VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR."); } return skip; } bool CoreChecks::PreCallValidateCmdBuildAccelerationStructuresIndirectKHR(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR *pInfos, const VkDeviceAddress *pIndirectDeviceAddresses, const uint32_t *pIndirectStrides, const uint32_t *const *ppMaxPrimitiveCounts) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BUILDACCELERATIONSTRUCTURESINDIRECTKHR, "vkCmdBuildAccelerationStructuresIndirectKHR()"); for (uint32_t i = 0; i < infoCount; ++i) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfos[i].srcAccelerationStructure); const ACCELERATION_STRUCTURE_STATE_KHR *dst_as_state = GetAccelerationStructureStateKHR(pInfos[i].dstAccelerationStructure); if (pInfos[i].mode == VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR) { if (src_as_state == nullptr || !src_as_state->built || !(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03667", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its srcAccelerationStructure member must have " "been built before with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR set in " "VkAccelerationStructureBuildGeometryInfoKHR::flags."); } if (pInfos[i].geometryCount != src_as_state->build_info_khr.geometryCount) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03758", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is " "VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR," " its geometryCount member must have the same value which was specified when " "srcAccelerationStructure was last built."); } if (pInfos[i].flags != src_as_state->build_info_khr.flags) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03759", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its flags member must have the same value which" " was specified when srcAccelerationStructure was last built."); } if (pInfos[i].type != src_as_state->build_info_khr.type) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03760", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its mode member is" " VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, its type member must have the same value which" " was specified when srcAccelerationStructure was last built."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03700", "vkCmdBuildAccelerationStructuresIndirectKHR(): For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, its dstAccelerationStructure member must have " "been created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } if (pInfos[i].type == VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR) { if (!dst_as_state || (dst_as_state && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR && dst_as_state->create_infoKHR.type != VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR)) { skip |= LogError(device, "VUID-vkCmdBuildAccelerationStructuresIndirectKHR-pInfos-03699", "vkCmdBuildAccelerationStructuresIndirectKHR():For each element of pInfos, if its type member is " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, its dstAccelerationStructure member must have been " "created with a value of VkAccelerationStructureCreateInfoKHR::type equal to either " "VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR or VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR."); } } } return skip; } bool CoreChecks::ValidateCopyAccelerationStructureInfoKHR(const VkCopyAccelerationStructureInfoKHR *pInfo, const char *api_name) const { bool skip = false; if (pInfo->mode == VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR) { const ACCELERATION_STRUCTURE_STATE_KHR *src_as_state = GetAccelerationStructureStateKHR(pInfo->src); if (!(src_as_state->build_info_khr.flags & VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR)) { skip |= LogError(device, "VUID-VkCopyAccelerationStructureInfoKHR-src-03411", "(%s): src must have been built with VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR" "if mode is VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR.", api_name); } } return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureKHR(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR *pInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTUREKHR, "vkCmdCopyAccelerationStructureKHR()"); skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCmdCopyAccelerationStructureKHR"); return false; } bool CoreChecks::PreCallValidateCopyAccelerationStructureKHR(VkDevice device, VkDeferredOperationKHR deferredOperation, const VkCopyAccelerationStructureInfoKHR *pInfo) const { bool skip = false; skip |= ValidateCopyAccelerationStructureInfoKHR(pInfo, "vkCopyAccelerationStructureKHR"); return skip; } bool CoreChecks::PreCallValidateCmdCopyAccelerationStructureToMemoryKHR( VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR *pInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_COPYACCELERATIONSTRUCTURETOMEMORYKHR, "vkCmdCopyAccelerationStructureToMemoryKHR()"); const auto *accel_state = GetAccelerationStructureStateKHR(pInfo->src); if (accel_state) { const auto *buffer_state = GetBufferState(accel_state->create_infoKHR.buffer); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdCopyAccelerationStructureToMemoryKHR", "VUID-vkCmdCopyAccelerationStructureToMemoryKHR-None-03559"); } return skip; } bool CoreChecks::PreCallValidateCmdCopyMemoryToAccelerationStructureKHR( VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR *pInfo) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_COPYMEMORYTOACCELERATIONSTRUCTUREKHR, "vkCmdCopyMemoryToAccelerationStructureKHR()"); return skip; } bool CoreChecks::PreCallValidateCmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes) const { bool skip = false; char const *const cmd_name = "CmdBindTransformFeedbackBuffersEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-transformFeedback-02355", "%s: transformFeedback feature is not enabled.", cmd_name); } { auto const cb_state = GetCBState(commandBuffer); if (cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBindTransformFeedbackBuffersEXT-None-02365", "%s: transform feedback is active.", cmd_name); } } for (uint32_t i = 0; i < bindingCount; ++i) { auto const buffer_state = GetBufferState(pBuffers[i]); assert(buffer_state != nullptr); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02358", "%s: pOffset[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than or equal to the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pOffsets[i], i, buffer_state->createInfo.size); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02360", "%s: pBuffers[%" PRIu32 "] (%s)" " was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT flag.", cmd_name, i, report_data->FormatHandle(pBuffers[i]).c_str()); } // pSizes is optional and may be nullptr. Also might be VK_WHOLE_SIZE which VU don't apply if ((pSizes != nullptr) && (pSizes[i] != VK_WHOLE_SIZE)) { // only report one to prevent redundant error if the size is larger since adding offset will be as well if (pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pSizes-02362", "%s: pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pSizes[i], i, buffer_state->createInfo.size); } else if (pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindTransformFeedbackBuffersEXT-pOffsets-02363", "%s: The sum of pOffsets[%" PRIu32 "](Ox%" PRIxLEAST64 ") and pSizes[%" PRIu32 "](0x%" PRIxLEAST64 ") is greater than the size of pBuffers[%" PRIu32 "](0x%" PRIxLEAST64 ").", cmd_name, i, pOffsets[i], i, pSizes[i], i, buffer_state->createInfo.size); } } skip |= ValidateMemoryIsBoundToBuffer(buffer_state, cmd_name, "VUID-vkCmdBindTransformFeedbackBuffersEXT-pBuffers-02364"); } return skip; } bool CoreChecks::PreCallValidateCmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdBeginTransformFeedbackEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-transformFeedback-02366", "%s: transformFeedback feature is not enabled.", cmd_name); } { auto const cb_state = GetCBState(commandBuffer); if (cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-None-02367", "%s: transform feedback is active.", cmd_name); } } // pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr // if pCounterBuffers is nullptr. if (pCounterBuffers == nullptr) { if (pCounterBufferOffsets != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffer-02371", "%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name); } } else { for (uint32_t i = 0; i < counterBufferCount; ++i) { if (pCounterBuffers[i] != VK_NULL_HANDLE) { auto const buffer_state = GetBufferState(pCounterBuffers[i]); assert(buffer_state != nullptr); if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBufferOffsets-02370", "%s: pCounterBuffers[%" PRIu32 "](%s) is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIx64 ").", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str(), i, pCounterBufferOffsets[i]); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBeginTransformFeedbackEXT-pCounterBuffers-02372", "%s: pCounterBuffers[%" PRIu32 "] (%s) was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str()); } } } } return skip; } bool CoreChecks::PreCallValidateCmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer *pCounterBuffers, const VkDeviceSize *pCounterBufferOffsets) const { bool skip = false; char const *const cmd_name = "CmdEndTransformFeedbackEXT"; if (!enabled_features.transform_feedback_features.transformFeedback) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-transformFeedback-02374", "%s: transformFeedback feature is not enabled.", cmd_name); } { auto const cb_state = GetCBState(commandBuffer); if (!cb_state->transform_feedback_active) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-None-02375", "%s: transform feedback is not active.", cmd_name); } } // pCounterBuffers and pCounterBufferOffsets are optional and may be nullptr. Additionaly, pCounterBufferOffsets must be nullptr // if pCounterBuffers is nullptr. if (pCounterBuffers == nullptr) { if (pCounterBufferOffsets != nullptr) { skip |= LogError(commandBuffer, "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffer-02379", "%s: pCounterBuffers is NULL and pCounterBufferOffsets is not NULL.", cmd_name); } } else { for (uint32_t i = 0; i < counterBufferCount; ++i) { if (pCounterBuffers[i] != VK_NULL_HANDLE) { auto const buffer_state = GetBufferState(pCounterBuffers[i]); assert(buffer_state != nullptr); if (pCounterBufferOffsets != nullptr && pCounterBufferOffsets[i] + 4 > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdEndTransformFeedbackEXT-pCounterBufferOffsets-02378", "%s: pCounterBuffers[%" PRIu32 "](%s) is not large enough to hold 4 bytes at pCounterBufferOffsets[%" PRIu32 "](0x%" PRIx64 ").", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str(), i, pCounterBufferOffsets[i]); } if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT) == 0) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdEndTransformFeedbackEXT-pCounterBuffers-02380", "%s: pCounterBuffers[%" PRIu32 "] (%s) was not created with the VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT flag.", cmd_name, i, report_data->FormatHandle(pCounterBuffers[i]).c_str()); } } } } return skip; } bool CoreChecks::PreCallValidateCmdSetLogicOpEXT(VkCommandBuffer commandBuffer, VkLogicOp logicOp) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETLOGICOPEXT, "vkCmdSetLogicOpEXT()"); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2LogicOp) { skip |= LogError(commandBuffer, "VUID-vkCmdSetLogicOpEXT-None-04867", "vkCmdSetLogicOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPatchControlPointsEXT(VkCommandBuffer commandBuffer, uint32_t patchControlPoints) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETPATCHCONTROLPOINTSEXT, "vkCmdSetPatchControlPointsEXT()"); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2PatchControlPoints) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-None-04873", "vkCmdSetPatchControlPointsEXT: extendedDynamicState feature is not enabled."); } if (patchControlPoints > phys_dev_props.limits.maxTessellationPatchSize) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPatchControlPointsEXT-patchControlPoints-04874", "vkCmdSetPatchControlPointsEXT: The value of patchControlPoints must be less than " "VkPhysicalDeviceLimits::maxTessellationPatchSize"); } return skip; } bool CoreChecks::PreCallValidateCmdSetRasterizerDiscardEnableEXT(VkCommandBuffer commandBuffer, VkBool32 rasterizerDiscardEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETRASTERIZERDISCARDENABLEEXT, "vkCmdSetRasterizerDiscardEnableEXT()"); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetRasterizerDiscardEnableEXT-None-04871", "vkCmdSetRasterizerDiscardEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBiasEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBiasEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHBIASENABLEEXT, "vkCmdSetDepthBiasEnableEXT()"); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBiasEnableEXT-None-04872", "vkCmdSetDepthBiasEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPrimitiveRestartEnableEXT(VkCommandBuffer commandBuffer, VkBool32 primitiveRestartEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVERESTARTENABLEEXT, "vkCmdSetPrimitiveRestartEnableEXT()"); if (!enabled_features.extended_dynamic_state2_features.extendedDynamicState2) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveRestartEnableEXT-None-04866", "vkCmdSetPrimitiveRestartEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetCullModeEXT(VkCommandBuffer commandBuffer, VkCullModeFlags cullMode) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETCULLMODEEXT, "vkCmdSetCullModeEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetCullModeEXT-None-03384", "vkCmdSetCullModeEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetFrontFaceEXT(VkCommandBuffer commandBuffer, VkFrontFace frontFace) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETFRONTFACEEXT, "vkCmdSetFrontFaceEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetFrontFaceEXT-None-03383", "vkCmdSetFrontFaceEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetPrimitiveTopologyEXT(VkCommandBuffer commandBuffer, VkPrimitiveTopology primitiveTopology) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETPRIMITIVETOPOLOGYEXT, "vkCmdSetPrimitiveTopologyEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetPrimitiveTopologyEXT-None-03347", "vkCmdSetPrimitiveTopologyEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetViewportWithCountEXT(VkCommandBuffer commandBuffer, uint32_t viewportCount, const VkViewport *pViewports) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETVIEWPORTWITHCOUNTEXT, "vkCmdSetViewportWithCountEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetViewportWithCountEXT-None-03393", "vkCmdSetViewportWithCountEXT: extendedDynamicState feature is not enabled."); } skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetViewportWithCountEXT-commandBuffer-04819", "vkCmdSetViewportWithCountEXT"); return skip; } bool CoreChecks::PreCallValidateCmdSetScissorWithCountEXT(VkCommandBuffer commandBuffer, uint32_t scissorCount, const VkRect2D *pScissors) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSCISSORWITHCOUNTEXT, "vkCmdSetScissorWithCountEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetScissorWithCountEXT-None-03396", "vkCmdSetScissorWithCountEXT: extendedDynamicState feature is not enabled."); } skip |= ForbidInheritedViewportScissor(commandBuffer, cb_state, "VUID-vkCmdSetScissorWithCountEXT-commandBuffer-04820", "vkCmdSetScissorWithCountEXT"); return skip; } bool CoreChecks::PreCallValidateCmdBindVertexBuffers2EXT(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers, const VkDeviceSize *pOffsets, const VkDeviceSize *pSizes, const VkDeviceSize *pStrides) const { const auto cb_state = GetCBState(commandBuffer); assert(cb_state); bool skip = false; skip |= ValidateCmd(cb_state, CMD_BINDVERTEXBUFFERS2EXT, "vkCmdBindVertexBuffers2EXT()"); for (uint32_t i = 0; i < bindingCount; ++i) { const auto buffer_state = GetBufferState(pBuffers[i]); if (buffer_state) { skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, true, "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03359", "vkCmdBindVertexBuffers2EXT()", "VK_BUFFER_USAGE_VERTEX_BUFFER_BIT"); skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdBindVertexBuffers2EXT()", "VUID-vkCmdBindVertexBuffers2EXT-pBuffers-03360"); if (pOffsets[i] >= buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers2EXT-pOffsets-03357", "vkCmdBindVertexBuffers2EXT() offset (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pOffsets[i]); } if (pSizes && pOffsets[i] + pSizes[i] > buffer_state->createInfo.size) { skip |= LogError(buffer_state->buffer(), "VUID-vkCmdBindVertexBuffers2EXT-pSizes-03358", "vkCmdBindVertexBuffers2EXT() size (0x%" PRIxLEAST64 ") is beyond the end of the buffer.", pSizes[i]); } } } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthTestEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHTESTENABLEEXT, "vkCmdSetDepthTestEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthTestEnableEXT-None-03352", "vkCmdSetDepthTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthWriteEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthWriteEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHWRITEENABLEEXT, "vkCmdSetDepthWriteEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthWriteEnableEXT-None-03354", "vkCmdSetDepthWriteEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthCompareOpEXT(VkCommandBuffer commandBuffer, VkCompareOp depthCompareOp) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHCOMPAREOPEXT, "vkCmdSetDepthCompareOpEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthCompareOpEXT-None-03353", "vkCmdSetDepthCompareOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetDepthBoundsTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 depthBoundsTestEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETDEPTHBOUNDSTESTENABLEEXT, "vkCmdSetDepthBoundsTestEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetDepthBoundsTestEnableEXT-None-03349", "vkCmdSetDepthBoundsTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilTestEnableEXT(VkCommandBuffer commandBuffer, VkBool32 stencilTestEnable) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSTENCILTESTENABLEEXT, "vkCmdSetStencilTestEnableEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilTestEnableEXT-None-03350", "vkCmdSetStencilTestEnableEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCmdSetStencilOpEXT(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETSTENCILOPEXT, "vkCmdSetStencilOpEXT()"); if (!enabled_features.extended_dynamic_state_features.extendedDynamicState) { skip |= LogError(commandBuffer, "VUID-vkCmdSetStencilOpEXT-None-03351", "vkCmdSetStencilOpEXT: extendedDynamicState feature is not enabled."); } return skip; } bool CoreChecks::PreCallValidateCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) const { bool skip = false; if (device_extensions.vk_khr_portability_subset != ExtEnabled::kNotEnabled) { if (VK_FALSE == enabled_features.portability_subset_features.events) { skip |= LogError(device, "VUID-vkCreateEvent-events-04468", "vkCreateEvent: events are not supported via VK_KHR_portability_subset"); } } return skip; } bool CoreChecks::PreCallValidateCmdSetRayTracingPipelineStackSizeKHR(VkCommandBuffer commandBuffer, uint32_t pipelineStackSize) const { bool skip = false; const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); skip |= ValidateCmd(cb_state, CMD_SETRAYTRACINGPIPELINESTACKSIZEKHR, "vkCmdSetRayTracingPipelineStackSizeKHR()"); return skip; } bool CoreChecks::PreCallValidateGetRayTracingShaderGroupStackSizeKHR(VkDevice device, VkPipeline pipeline, uint32_t group, VkShaderGroupShaderKHR groupShader) const { bool skip = false; const PIPELINE_STATE *pipeline_state = GetPipelineState(pipeline); if (group >= pipeline_state->raytracingPipelineCI.groupCount) { skip |= LogError(device, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-group-03608", "vkGetRayTracingShaderGroupStackSizeKHR: The value of group must be less than the number of shader groups " "in pipeline."); } return skip; } bool CoreChecks::PreCallValidateCmdSetFragmentShadingRateKHR(VkCommandBuffer commandBuffer, const VkExtent2D *pFragmentSize, const VkFragmentShadingRateCombinerOpKHR combinerOps[2]) const { const CMD_BUFFER_STATE *cb_state = GetCBState(commandBuffer); assert(cb_state); const char *cmd_name = "vkCmdSetFragmentShadingRateKHR()"; bool skip = false; skip |= ValidateCmd(cb_state, CMD_SETFRAGMENTSHADINGRATEKHR, cmd_name); if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && !enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && !enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) { skip |= LogError( cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04509", "vkCmdSetFragmentShadingRateKHR: Application called %s, but no fragment shading rate features have been enabled.", cmd_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->width != 1) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04507", "vkCmdSetFragmentShadingRateKHR: Pipeline fragment width of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pFragmentSize->width, cmd_name); } if (!enabled_features.fragment_shading_rate_features.pipelineFragmentShadingRate && pFragmentSize->height != 1) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pipelineFragmentShadingRate-04508", "vkCmdSetFragmentShadingRateKHR: Pipeline fragment height of %u has been specified in %s, but " "pipelineFragmentShadingRate is not enabled", pFragmentSize->height, cmd_name); } if (!enabled_features.fragment_shading_rate_features.primitiveFragmentShadingRate && combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-primitiveFragmentShadingRate-04510", "vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but " "primitiveFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name); } if (!enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate && combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-attachmentFragmentShadingRate-04511", "vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but " "attachmentFragmentShadingRate is not enabled", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && combinerOps[0] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512", "vkCmdSetFragmentShadingRateKHR: First combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps is " "not supported", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[0]), cmd_name); } if (!phys_dev_ext_props.fragment_shading_rate_props.fragmentShadingRateNonTrivialCombinerOps && (combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR && combinerOps[1] != VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-fragmentSizeNonTrivialCombinerOps-04512", "vkCmdSetFragmentShadingRateKHR: Second combiner operation of %s has been specified in %s, but " "fragmentShadingRateNonTrivialCombinerOps " "is not supported", string_VkFragmentShadingRateCombinerOpKHR(combinerOps[1]), cmd_name); } if (pFragmentSize->width == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04513", "vkCmdSetFragmentShadingRateKHR: Fragment width of %u has been specified in %s.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height == 0) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04514", "vkCmdSetFragmentShadingRateKHR: Fragment height of %u has been specified in %s.", pFragmentSize->height, cmd_name); } if (pFragmentSize->width != 0 && !IsPowerOfTwo(pFragmentSize->width)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04515", "vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment width of %u has been specified in %s.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height != 0 && !IsPowerOfTwo(pFragmentSize->height)) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04516", "vkCmdSetFragmentShadingRateKHR: Non-power-of-two fragment height of %u has been specified in %s.", pFragmentSize->height, cmd_name); } if (pFragmentSize->width > 4) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04517", "vkCmdSetFragmentShadingRateKHR: Fragment width of %u specified in %s is too large.", pFragmentSize->width, cmd_name); } if (pFragmentSize->height > 4) { skip |= LogError(cb_state->commandBuffer(), "VUID-vkCmdSetFragmentShadingRateKHR-pFragmentSize-04518", "vkCmdSetFragmentShadingRateKHR: Fragment height of %u specified in %s is too large", pFragmentSize->height, cmd_name); } return skip; }
1
18,127
nit, can we use `VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR` here (granted it isn't else where, maybe worth fixing here or in separate PR)
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -23,6 +23,6 @@ class Api::V1::ExercisesController < ApiController end def exercise_parameters - params.require(:exercise).permit(:edit_url, :summary, :title, :url) + params.require(:exercise).permit(:edit_url, :summary, :name, :url) end end
1
class Api::V1::ExercisesController < ApiController before_action :doorkeeper_authorize! skip_before_filter :verify_authenticity_token def update if authenticated_via_client_credentials_token? exercise = Exercise.find_or_initialize_by(uuid: params[:id]) if exercise.update_attributes(exercise_parameters) render json: exercise else render json: { errors: exercise.errors }, status: :unprocessable_entity end else head :unauthorized end end private def authenticated_via_client_credentials_token? doorkeeper_token.resource_owner_id.nil? end def exercise_parameters params.require(:exercise).permit(:edit_url, :summary, :title, :url) end end
1
13,925
This will need to be updated in the upcase-exercises repo as well.
thoughtbot-upcase
rb
@@ -21,7 +21,7 @@ class Trail < ActiveRecord::Base def steps_remaining_for(user) ExerciseWithProgressQuery. new(user: user, exercises: exercises). - count { |exercise| exercise.state != Status::REVIEWED } + count { |exercise| exercise.state != Status::COMPLETE } end def self.most_recent_published
1
class Trail < ActiveRecord::Base extend FriendlyId validates :name, :description, presence: true has_many :steps, -> { order "position ASC" }, dependent: :destroy has_many :exercises, through: :steps friendly_id :name, use: [:slugged, :finders] # Override setters so it preserves the ordering def exercise_ids=(new_exercise_ids) super new_exercise_ids = new_exercise_ids.reject(&:blank?).map(&:to_i) new_exercise_ids.each_with_index do |exercise_id, index| steps.where(exercise_id: exercise_id).update_all(position: index + 1) end end def steps_remaining_for(user) ExerciseWithProgressQuery. new(user: user, exercises: exercises). count { |exercise| exercise.state != Status::REVIEWED } end def self.most_recent_published order(created_at: :desc).where(published: true) end end
1
12,461
Think it's worth extracting this to `Exercise#complete?`?
thoughtbot-upcase
rb
@@ -87,14 +87,6 @@ class Currency return $this->exchangeRate; } - /** - * @return string - */ - public function getReversedExchangeRate() - { - return 1 / $this->exchangeRate; - } - /** * @param string $exchangeRate */
1
<?php namespace Shopsys\FrameworkBundle\Model\Pricing\Currency; use Doctrine\ORM\Mapping as ORM; /** * @ORM\Table(name="currencies") * @ORM\Entity */ class Currency { const CODE_CZK = 'CZK'; const CODE_EUR = 'EUR'; const DEFAULT_EXCHANGE_RATE = 1; /** * @var int * * @ORM\Column(type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="IDENTITY") */ protected $id; /** * @var string * * @ORM\Column(type="string", length=50) */ protected $name; /** * @var string * * @ORM\Column(type="string", length=3) */ protected $code; /** * @var string * * @ORM\Column(type="decimal", precision=20, scale=6) */ protected $exchangeRate; /** * @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyData $currencyData */ public function __construct(CurrencyData $currencyData) { $this->name = $currencyData->name; $this->code = $currencyData->code; $this->exchangeRate = $currencyData->exchangeRate; } /** * @return int */ public function getId() { return $this->id; } /** * @return string */ public function getName() { return $this->name; } /** * @return string */ public function getCode() { return $this->code; } /** * @return string */ public function getExchangeRate() { return $this->exchangeRate; } /** * @return string */ public function getReversedExchangeRate() { return 1 / $this->exchangeRate; } /** * @param string $exchangeRate */ public function setExchangeRate($exchangeRate) { $this->exchangeRate = $exchangeRate; } /** * @param \Shopsys\FrameworkBundle\Model\Pricing\Currency\CurrencyData $currencyData */ public function edit(CurrencyData $currencyData) { $this->name = $currencyData->name; $this->code = $currencyData->code; } }
1
12,682
This is still a potentially useful public method - should we remove such methods?
shopsys-shopsys
php
@@ -40,15 +40,9 @@ public abstract class ApiDefaultsConfig { /** The name of the license of the client library. */ public abstract String licenseName(); - protected abstract Map<TargetLanguage, ReleaseLevel> releaseLevel(); - /** The development status of the client library. Configured per language. */ - public ReleaseLevel releaseLevel(TargetLanguage language) { - ReleaseLevel level = releaseLevel().get(language); - if (level == null) { - level = ReleaseLevel.UNSET_RELEASE_LEVEL; - } - return level; + public ReleaseLevel releaseLevel() { + return ReleaseLevel.ALPHA; } protected abstract Map<TargetLanguage, VersionBound> generatedNonGAPackageVersionBound();
1
/* Copyright 2018 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.config; import com.google.api.codegen.ReleaseLevel; import com.google.api.codegen.TargetLanguage; import com.google.auto.value.AutoValue; import com.google.common.io.Resources; import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Map; import org.yaml.snakeyaml.Yaml; /** This class holds defaults which are mostly used for packaging files. */ @AutoValue public abstract class ApiDefaultsConfig { /** The author of the client library. */ public abstract String author(); /** The email of the author of the client library. */ public abstract String email(); /** The homepage of the client library. */ public abstract String homepage(); /** The name of the license of the client library. */ public abstract String licenseName(); protected abstract Map<TargetLanguage, ReleaseLevel> releaseLevel(); /** The development status of the client library. Configured per language. */ public ReleaseLevel releaseLevel(TargetLanguage language) { ReleaseLevel level = releaseLevel().get(language); if (level == null) { level = ReleaseLevel.UNSET_RELEASE_LEVEL; } return level; } protected abstract Map<TargetLanguage, VersionBound> generatedNonGAPackageVersionBound(); private static Builder newBuilder() { return new AutoValue_ApiDefaultsConfig.Builder(); } @AutoValue.Builder protected abstract static class Builder { abstract Builder author(String val); abstract Builder email(String val); abstract Builder homepage(String val); abstract Builder licenseName(String val); abstract Builder releaseLevel(Map<TargetLanguage, ReleaseLevel> val); abstract Builder generatedNonGAPackageVersionBound(Map<TargetLanguage, VersionBound> val); abstract ApiDefaultsConfig build(); } @SuppressWarnings("unchecked") private static ApiDefaultsConfig createFromString(String yamlContents) { Yaml yaml = new Yaml(); Map<String, Object> configMap = (Map<String, Object>) yaml.load(yamlContents); Builder builder = newBuilder() .author((String) configMap.get("author")) .email((String) configMap.get("email")) .homepage((String) configMap.get("homepage")) .licenseName((String) configMap.get("license")) .releaseLevel( Configs.createReleaseLevelMap((Map<String, String>) configMap.get("release_level"))) .generatedNonGAPackageVersionBound( Configs.createVersionMap( (Map<String, Map<String, String>>) configMap.get("generated_package_version"))); return builder.build(); } public static ApiDefaultsConfig load() throws IOException { URL apiDefaultsUrl = ApiDefaultsConfig.class.getResource("/com/google/api/codegen/packaging/api_defaults.yaml"); String contents = Resources.toString(apiDefaultsUrl, StandardCharsets.UTF_8); return createFromString(contents); } }
1
25,626
I don't understand this change, what is happening here?
googleapis-gapic-generator
java
@@ -944,7 +944,8 @@ class ExcelCellTextInfo(NVDAObjectTextInfo): def _getFormatFieldAndOffsets(self,offset,formatConfig,calculateOffsets=True): formatField=textInfos.FormatField() - if (self.obj.excelCellObject.Application.Version > "12.0"): + version=int(self.obj.excelCellObject.Application.Version.split('.')[0]) + if version>12: cellObj=self.obj.excelCellObject.DisplayFormat else: cellObj=self.obj.excelCellObject
1
#NVDAObjects/excel.py #A part of NonVisual Desktop Access (NVDA) #Copyright (C) 2006-2016 NV Access Limited, Dinesh Kaushal, Siddhartha Gupta #This file is covered by the GNU General Public License. #See the file COPYING for more details. from comtypes import COMError import comtypes.automation import wx import time import winsound import re import uuid import collections import oleacc import ui import speech from tableUtils import HeaderCellInfo, HeaderCellTracker import config import textInfos import colors import eventHandler import api from logHandler import log import gui import winUser from displayModel import DisplayModelTextInfo import controlTypes from . import Window from .. import NVDAObjectTextInfo import scriptHandler import browseMode import inputCore import ctypes xlNone=-4142 xlSimple=-4154 xlExtended=3 xlCenter=-4108 xlJustify=-4130 xlLeft=-4131 xlRight=-4152 xlDistributed=-4117 xlBottom=-4107 xlTop=-4160 xlDown=-4121 xlToLeft=-4159 xlToRight=-4161 xlUp=-4162 xlCellWidthUnitToPixels = 7.5919335705812574139976275207592 xlSheetVisible=-1 alignmentLabels={ xlCenter:"center", xlJustify:"justify", xlLeft:"left", xlRight:"right", xlDistributed:"distributed", xlBottom:"botom", xlTop:"top", 1:"default", } xlA1 = 1 xlRC = 2 xlUnderlineStyleNone=-4142 #Excel cell types xlCellTypeAllFormatConditions =-4172 # from enum XlCellType xlCellTypeAllValidation =-4174 # from enum XlCellType xlCellTypeBlanks =4 # from enum XlCellType xlCellTypeComments =-4144 # from enum XlCellType xlCellTypeConstants =2 # from enum XlCellType xlCellTypeFormulas =-4123 # from enum XlCellType xlCellTypeLastCell =11 # from enum XlCellType xlCellTypeSameFormatConditions=-4173 # from enum XlCellType xlCellTypeSameValidation =-4175 # from enum XlCellType xlCellTypeVisible =12 # from enum XlCellType #MsoShapeType Enumeration msoFormControl=8 msoTextBox=17 #XlFormControl Enumeration xlButtonControl=0 xlCheckBox=1 xlDropDown=2 xlEditBox=3 xlGroupBox=4 xlLabel=5 xlListBox=6 xlOptionButton=7 xlScrollBar=8 xlSpinner=9 #MsoTriState Enumeration msoTrue=-1 #True msoFalse=0 #False #CheckBox and RadioButton States checked=1 unchecked=-4146 mixed=2 #LogPixels LOGPIXELSX=88 LOGPIXELSY=90 #Excel Cell Patterns (from enum XlPattern) xlPatternAutomatic = -4105 xlPatternChecker = 9 xlPatternCrissCross = 16 xlPatternDown = -4121 xlPatternGray16 = 17 xlPatternGray25 = -4124 xlPatternGray50 = -4125 xlPatternGray75 = -4126 xlPatternGray8 = 18 xlPatternGrid = 15 xlPatternHorizontal = -4128 xlPatternLightDown = 13 xlPatternLightHorizontal = 11 xlPatternLightUp = 14 xlPatternLightVertical = 12 xlPatternNone = -4142 xlPatternSemiGray75 = 10 xlPatternSolid = 1 xlPatternUp = -4162 xlPatternVertical = -4166 xlPatternLinearGradient = 4000 xlPatternRectangularGradient = 4001 backgroundPatternLabels={ # See https://msdn.microsoft.com/en-us/library/microsoft.office.interop.excel.xlpattern.aspx # Translators: A type of background pattern in Microsoft Excel. # Excel controls the pattern. xlPatternAutomatic:_("automatic"), # Translators: A type of background pattern in Microsoft Excel. # Checkerboard xlPatternChecker:_("diagonal crosshatch"), # Translators: A type of background pattern in Microsoft Excel. # Criss-cross lines xlPatternCrissCross:_("thin diagonal crosshatch"), # Translators: A type of background pattern in Microsoft Excel. # Dark diagonal lines running from the upper left to the lower right xlPatternDown:_("reverse diagonal stripe"), # Translators: A type of background pattern in Microsoft Excel. # 12.5% gray # xgettext:no-python-format xlPatternGray16:_("12.5% gray"), # Translators: A type of background pattern in Microsoft Excel. # 25% gray # xgettext:no-python-format xlPatternGray25:_("25% gray"), # Translators: A type of background pattern in Microsoft Excel. # xgettext:no-python-format # 50% gray xlPatternGray50:_("50% gray"), # Translators: A type of background pattern in Microsoft Excel. # 75% gray # xgettext:no-python-format xlPatternGray75:_("75% gray"), # Translators: A type of background pattern in Microsoft Excel. # 6.25% gray # xgettext:no-python-format xlPatternGray8:_("6.25% gray"), # Translators: A type of background pattern in Microsoft Excel. # Grid xlPatternGrid:_("thin horizontal crosshatch"), # Translators: A type of background pattern in Microsoft Excel. # Dark horizontal lines xlPatternHorizontal:_("horizontal stripe"), # Translators: A type of background pattern in Microsoft Excel. # Light diagonal lines running from the upper left to the lower right xlPatternLightDown:_("thin reverse diagonal stripe"), # Translators: A type of background pattern in Microsoft Excel. # Light horizontal lines xlPatternLightHorizontal:_("thin horizontal stripe"), # Translators: A type of background pattern in Microsoft Excel. # Light diagonal lines running from the lower left to the upper right xlPatternLightUp:_("thin diagonal stripe"), # Translators: A type of background pattern in Microsoft Excel. # Light vertical bars xlPatternLightVertical:_("thin vertical stripe"), # Translators: A type of background pattern in Microsoft Excel. # No pattern xlPatternNone:_("none"), # Translators: A type of background pattern in Microsoft Excel. # 75% dark moire xlPatternSemiGray75:_("thick diagonal crosshatch"), # Translators: A type of background pattern in Microsoft Excel. # Solid color xlPatternSolid:_("solid"), # Translators: A type of background pattern in Microsoft Excel. # Dark diagonal lines running from the lower left to the upper right xlPatternUp:_("diagonal stripe"), # Translators: A type of background pattern in Microsoft Excel. # Dark vertical bars xlPatternVertical:_("vertical stripe"), # Translators: A type of background pattern in Microsoft Excel. xlPatternLinearGradient:_("linear gradient"), # Translators: A type of background pattern in Microsoft Excel. xlPatternRectangularGradient:_("rectangular gradient"), } from excelCellBorder import getCellBorderStyleDescription re_RC=re.compile(r'R(?:\[(\d+)\])?C(?:\[(\d+)\])?') re_absRC=re.compile(r'^R(\d+)C(\d+)(?::R(\d+)C(\d+))?$') class ExcelQuickNavItem(browseMode.QuickNavItem): def __init__( self , nodeType , document , itemObject , itemCollection ): self.excelItemObject = itemObject self.excelItemCollection = itemCollection super( ExcelQuickNavItem ,self).__init__( nodeType , document ) def activate(self): pass def isChild(self,parent): return False def report(self,readUnit=None): pass class ExcelChartQuickNavItem(ExcelQuickNavItem): def __init__( self , nodeType , document , chartObject , chartCollection ): self.chartIndex = chartObject.Index if chartObject.Chart.HasTitle: self.label = chartObject.Chart.ChartTitle.Text + " " + chartObject.TopLeftCell.address(False,False,1,False) + "-" + chartObject.BottomRightCell.address(False,False,1,False) else: self.label = chartObject.Name + " " + chartObject.TopLeftCell.address(False,False,1,False) + "-" + chartObject.BottomRightCell.address(False,False,1,False) super( ExcelChartQuickNavItem ,self).__init__( nodeType , document , chartObject , chartCollection ) def __lt__(self,other): return self.chartIndex < other.chartIndex def moveTo(self): try: self.excelItemObject.Activate() # After activate(), though the chart object is selected, # pressing arrow keys moves the object, rather than # let use go inside for sub-objects. Somehow # calling an COM function on a different object fixes that ! log.debugWarning( self.excelItemCollection.Count ) except(COMError): pass focus=api.getDesktopObject().objectWithFocus() if not focus or not isinstance(focus,ExcelBase): return # Charts are not yet automatically detected with objectFromFocus, so therefore use selection sel=focus._getSelection() if not sel: return eventHandler.queueEvent("gainFocus",sel) @property def isAfterSelection(self): activeCell = self.document.Application.ActiveCell #log.debugWarning("active row: {} active column: {} current row: {} current column: {}".format ( activeCell.row , activeCell.column , self.excelCommentObject.row , self.excelCommentObject.column ) ) if self.excelItemObject.TopLeftCell.row == activeCell.row: if self.excelItemObject.TopLeftCell.column > activeCell.column: return False elif self.excelItemObject.TopLeftCell.row > activeCell.row: return False return True class ExcelRangeBasedQuickNavItem(ExcelQuickNavItem): def __lt__(self,other): if self.excelItemObject.row == other.excelItemObject.row: return self.excelItemObject.column < other.excelItemObject.column else: return self.excelItemObject.row < other.excelItemObject.row def moveTo(self): self.excelItemObject.Activate() eventHandler.queueEvent("gainFocus",api.getDesktopObject().objectWithFocus()) @property def isAfterSelection(self): activeCell = self.document.Application.ActiveCell log.debugWarning("active row: {} active column: {} current row: {} current column: {}".format ( activeCell.row , activeCell.column , self.excelItemObject.row , self.excelItemObject.column ) ) if self.excelItemObject.row == activeCell.row: if self.excelItemObject.column > activeCell.column: return False elif self.excelItemObject.row > activeCell.row: return False return True class ExcelCommentQuickNavItem(ExcelRangeBasedQuickNavItem): def __init__( self , nodeType , document , commentObject , commentCollection ): self.comment=commentObject.comment self.label = commentObject.address(False,False,1,False) + " " + (self.comment.Text() if self.comment else "") super( ExcelCommentQuickNavItem , self).__init__( nodeType , document , commentObject , commentCollection ) class ExcelFormulaQuickNavItem(ExcelRangeBasedQuickNavItem): def __init__( self , nodeType , document , formulaObject , formulaCollection ): self.label = formulaObject.address(False,False,1,False) + " " + formulaObject.Formula super( ExcelFormulaQuickNavItem , self).__init__( nodeType , document , formulaObject , formulaCollection ) class ExcelQuicknavIterator(object): """ Allows iterating over an MS excel collection (e.g. Comments, Formulas or charts) emitting L{QuickNavItem} objects. """ def __init__(self, itemType , document , direction , includeCurrent): """ See L{QuickNavItemIterator} for itemType, document and direction definitions. @ param includeCurrent: if true then any item at the initial position will be also emitted rather than just further ones. """ self.document=document self.itemType=itemType self.direction=direction if direction else "next" self.includeCurrent=includeCurrent def collectionFromWorksheet(self,worksheetObject): """ Fetches a Microsoft Excel collection object from a Microsoft excel worksheet object. E.g. charts, comments, or formula. @param worksheetObject: a Microsoft excel worksheet object. @return: a Microsoft excel collection object. """ raise NotImplementedError def filter(self,item): """ Only allows certain items fom a collection to be emitted. E.g. a chart . @param item: an item from a Microsoft excel collection (e.g. chart object). @return True if this item should be allowd, false otherwise. @rtype: bool """ return True def iterate(self): """ returns a generator that emits L{QuickNavItem} objects for this collection. """ items=self.collectionFromWorksheet(self.document) if not items: return if self.direction=="previous": items=reversed(items) for collectionItem in items: item=self.quickNavItemClass(self.itemType,self.document,collectionItem , items ) if not self.filter(collectionItem): continue yield item class ChartExcelCollectionQuicknavIterator(ExcelQuicknavIterator): quickNavItemClass=ExcelChartQuickNavItem#: the QuickNavItem class that should be instanciated and emitted. def collectionFromWorksheet( self , worksheetObject ): return worksheetObject.ChartObjects() class CommentExcelCollectionQuicknavIterator(ExcelQuicknavIterator): quickNavItemClass=ExcelCommentQuickNavItem#: the QuickNavItem class that should be instanciated and emitted. def collectionFromWorksheet( self , worksheetObject ): try: return worksheetObject.cells.SpecialCells( xlCellTypeComments ) except(COMError): return None def filter(self,item): return item is not None and item.comment is not None class FormulaExcelCollectionQuicknavIterator(ExcelQuicknavIterator): quickNavItemClass=ExcelFormulaQuickNavItem#: the QuickNavItem class that should be instanciated and emitted. def collectionFromWorksheet( self , worksheetObject ): try: return worksheetObject.cells.SpecialCells( xlCellTypeFormulas ) except(COMError): return None class ExcelSheetQuickNavItem(ExcelQuickNavItem): def __init__( self , nodeType , document , sheetObject , sheetCollection ): self.label = sheetObject.Name self.sheetIndex = sheetObject.Index self.sheetObject = sheetObject super( ExcelSheetQuickNavItem , self).__init__( nodeType , document , sheetObject , sheetCollection ) def __lt__(self,other): return self.sheetIndex < other.sheetIndex def moveTo(self): self.sheetObject.Activate() eventHandler.queueEvent("gainFocus",api.getDesktopObject().objectWithFocus()) def rename(self,newName): if newName and newName!=self.label: self.sheetObject.Name=newName self.label=newName @property def isRenameAllowed(self): return True @property def isAfterSelection(self): activeSheet = self.document.Application.ActiveSheet if self.sheetObject.Index <= activeSheet.Index: return False else: return True class SheetsExcelCollectionQuicknavIterator(ExcelQuicknavIterator): """ Allows iterating over an MS excel Sheets collection emitting L{QuickNavItem} object. """ quickNavItemClass=ExcelSheetQuickNavItem#: the QuickNavItem class that should be instantiated and emitted. def collectionFromWorksheet( self , worksheetObject ): try: return worksheetObject.Application.ActiveWorkbook.sheets except(COMError): return None def filter(self,sheet): if sheet.Visible==xlSheetVisible: return True class ExcelBrowseModeTreeInterceptor(browseMode.BrowseModeTreeInterceptor): # This treeInterceptor starts in focus mode, thus escape should not switch back to browse mode disableAutoPassThrough=True def __init__(self,rootNVDAObject): super(ExcelBrowseModeTreeInterceptor,self).__init__(rootNVDAObject) self.passThrough=True browseMode.reportPassThrough.last=True def _get_currentNVDAObject(self): obj=api.getFocusObject() return obj if obj.treeInterceptor is self else None def _get_isAlive(self): if not winUser.isWindow(self.rootNVDAObject.windowHandle): return False try: return self.rootNVDAObject.excelWorksheetObject.name==self.rootNVDAObject.excelApplicationObject.activeSheet.name except (COMError,AttributeError,NameError): log.debugWarning("could not compare sheet names",exc_info=True) return False def navigationHelper(self,direction): excelWindowObject=self.rootNVDAObject.excelWindowObject cellPosition = excelWindowObject.activeCell try: if direction == "left": cellPosition = cellPosition.Offset(0,-1) elif direction == "right": cellPosition = cellPosition.Offset(0,1) elif direction == "up": cellPosition = cellPosition.Offset(-1,0) elif direction == "down": cellPosition = cellPosition.Offset(1,0) #Start-of-Column elif direction == "startcol": cellPosition = cellPosition.end(xlUp) #Start-of-Row elif direction == "startrow": cellPosition = cellPosition.end(xlToLeft) #End-of-Row elif direction == "endrow": cellPosition = cellPosition.end(xlToRight) #End-of-Column elif direction == "endcol": cellPosition = cellPosition.end(xlDown) else: return except COMError: pass try: isMerged=cellPosition.mergeCells except (COMError,NameError): isMerged=False if isMerged: cellPosition=cellPosition.MergeArea(1) obj=ExcelMergedCell(windowHandle=self.rootNVDAObject.windowHandle,excelWindowObject=excelWindowObject,excelCellObject=cellPosition) else: obj=ExcelCell(windowHandle=self.rootNVDAObject.windowHandle,excelWindowObject=excelWindowObject,excelCellObject=cellPosition) cellPosition.Select() cellPosition.Activate() eventHandler.executeEvent('gainFocus',obj) def script_moveLeft(self,gesture): self.navigationHelper("left") def script_moveRight(self,gesture): self.navigationHelper("right") def script_moveUp(self,gesture): self.navigationHelper("up") def script_moveDown(self,gesture): self.navigationHelper("down") def script_startOfColumn(self,gesture): self.navigationHelper("startcol") def script_startOfRow(self,gesture): self.navigationHelper("startrow") def script_endOfRow(self,gesture): self.navigationHelper("endrow") def script_endOfColumn(self,gesture): self.navigationHelper("endcol") def __contains__(self,obj): return winUser.isDescendantWindow(self.rootNVDAObject.windowHandle,obj.windowHandle) def _get_selection(self): return self.rootNVDAObject._getSelection() def _set_selection(self,info): super(ExcelBrowseModeTreeInterceptor,self)._set_selection(info) #review.handleCaretMove(info) def _get_ElementsListDialog(self): return ElementsListDialog def _iterNodesByType(self,nodeType,direction="next",pos=None): if nodeType=="chart": return ChartExcelCollectionQuicknavIterator( nodeType , self.rootNVDAObject.excelWorksheetObject , direction , None ).iterate() elif nodeType=="comment": return CommentExcelCollectionQuicknavIterator( nodeType , self.rootNVDAObject.excelWorksheetObject , direction , None ).iterate() elif nodeType=="formula": return FormulaExcelCollectionQuicknavIterator( nodeType , self.rootNVDAObject.excelWorksheetObject , direction , None ).iterate() elif nodeType=="sheet": return SheetsExcelCollectionQuicknavIterator( nodeType , self.rootNVDAObject.excelWorksheetObject , direction , None ).iterate() elif nodeType=="formField": return ExcelFormControlQuicknavIterator( nodeType , self.rootNVDAObject.excelWorksheetObject , direction , None,self ).iterate(pos) else: raise NotImplementedError def script_elementsList(self,gesture): super(ExcelBrowseModeTreeInterceptor,self).script_elementsList(gesture) # Translators: the description for the elements list command in Microsoft Excel. script_elementsList.__doc__ = _("Lists various types of elements in this spreadsheet") script_elementsList.ignoreTreeInterceptorPassThrough=True __gestures = { "kb:upArrow": "moveUp", "kb:downArrow":"moveDown", "kb:leftArrow":"moveLeft", "kb:rightArrow":"moveRight", "kb:control+upArrow":"startOfColumn", "kb:control+downArrow":"endOfColumn", "kb:control+leftArrow":"startOfRow", "kb:control+rightArrow":"endOfRow", } class ElementsListDialog(browseMode.ElementsListDialog): ELEMENT_TYPES=( # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("chart", _("&Charts")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("comment", _("C&omments")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("formula", _("Fo&rmulas")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("formField", _("&Form fields")), # Translators: The label of a radio button to select the type of element # in the browse mode Elements List dialog. ("sheet", _("&Sheets")), ) class ExcelBase(Window): """A base that all Excel NVDAObjects inherit from, which contains some useful methods.""" @staticmethod def excelWindowObjectFromWindow(windowHandle): try: pDispatch=oleacc.AccessibleObjectFromWindow(windowHandle,winUser.OBJID_NATIVEOM,interface=comtypes.automation.IDispatch) except (COMError,WindowsError): return None return comtypes.client.dynamic.Dispatch(pDispatch) @staticmethod def getCellAddress(cell, external=False,format=xlA1): text=cell.Address(False, False, format, external) textList=text.split(':') if len(textList)==2: # Translators: Used to express an address range in excel. text=_("{start} through {end}").format(start=textList[0], end=textList[1]) return text def _getDropdown(self): w=winUser.getAncestor(self.windowHandle,winUser.GA_ROOT) if not w: log.debugWarning("Could not get ancestor window (GA_ROOT)") return obj=Window(windowHandle=w,chooseBestAPI=False) if not obj: log.debugWarning("Could not instnaciate NVDAObject for ancestor window") return threadID=obj.windowThreadID while not eventHandler.isPendingEvents("gainFocus"): obj=obj.previous if not obj or not isinstance(obj,Window) or obj.windowThreadID!=threadID: log.debugWarning("Could not locate dropdown list in previous objects") return if obj.windowClassName=='EXCEL:': break return obj def _getSelection(self): selection=self.excelWindowObject.Selection try: isMerged=selection.mergeCells except (COMError,NameError): isMerged=False try: numCells=selection.count except (COMError,NameError): numCells=0 isChartActive = True if self.excelWindowObject.ActiveChart else False obj=None if isMerged: obj=ExcelMergedCell(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelCellObject=selection.item(1)) elif numCells>1: obj=ExcelSelection(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelRangeObject=selection) elif numCells==1: obj=ExcelCell(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelCellObject=selection) elif isChartActive: selection = self.excelWindowObject.ActiveChart import excelChart obj=excelChart.ExcelChart(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelChartObject=selection) return obj class Excel7Window(ExcelBase): """An overlay class for Window for the EXCEL7 window class, which simply bounces focus to the active excel cell.""" def _get_excelWindowObject(self): return self.excelWindowObjectFromWindow(self.windowHandle) def event_gainFocus(self): selection=self._getSelection() dropdown=self._getDropdown() if dropdown: if selection: dropdown.parent=selection eventHandler.executeEvent('gainFocus',dropdown) return if selection: eventHandler.executeEvent('gainFocus',selection) class ExcelWorksheet(ExcelBase): treeInterceptorClass=ExcelBrowseModeTreeInterceptor role=controlTypes.ROLE_TABLE def _get_excelApplicationObject(self): self.excelApplicationObject=self.excelWorksheetObject.application return self.excelApplicationObject re_definedName=re.compile( # Starts with an optional sheet name followed by an exclamation mark (!). # If a sheet name contains spaces then it is surrounded by single quotes (') # Examples: # Sheet1! # ''Sheet2 (4)'! # 'profit and loss'! u'^((?P<sheet>(\'[^\']+\'|[^!]+))!)?' # followed by a unique name (not containing spaces). Example: # rowtitle_ab12-cd34-de45 u'(?P<name>\w+)' # Optionally followed by minimum and maximum addresses, starting with a period (.). Example: # .a1.c3 # .ab34 u'(\.(?P<minAddress>[a-zA-Z]+[0-9]+)?(\.(?P<maxAddress>[a-zA-Z]+[0-9]+)?' # Optionally followed by a period (.) and extra random data (sometimes produced by other screen readers) u'(\..*)*)?)?$' ) def populateHeaderCellTrackerFromNames(self,headerCellTracker): sheetName=self.excelWorksheetObject.name for x in self.excelWorksheetObject.parent.names: fullName=x.name nameMatch=self.re_definedName.match(fullName) if not nameMatch: continue sheet=nameMatch.group('sheet') if sheet and sheet[0]=="'" and sheet[-1]=="'": sheet=sheet[1:-1] if sheet and sheet!=sheetName: continue name=nameMatch.group('name').lower() isColumnHeader=isRowHeader=False if name.startswith('title'): isColumnHeader=isRowHeader=True elif name.startswith('columntitle'): isColumnHeader=True elif name.startswith('rowtitle'): isRowHeader=True else: continue try: headerCell=x.refersToRange except COMError: continue if headerCell.parent.name!=sheetName: continue minColumnNumber=maxColumnNumber=minRowNumber=maxRowNumber=None minAddress=nameMatch.group('minAddress') if minAddress: try: minCell=self.excelWorksheetObject.range(minAddress) except COMError: minCell=None if minCell: minRowNumber=minCell.row minColumnNumber=minCell.column maxAddress=nameMatch.group('maxAddress') if maxAddress: try: maxCell=self.excelWorksheetObject.range(maxAddress) except COMError: maxCell=None if maxCell: maxRowNumber=maxCell.row maxColumnNumber=maxCell.column if maxColumnNumber is None: maxColumnNumber=self._getMaxColumnNumberForHeaderCell(headerCell) headerCellTracker.addHeaderCellInfo(rowNumber=headerCell.row,columnNumber=headerCell.column,rowSpan=headerCell.rows.count,colSpan=headerCell.columns.count,minRowNumber=minRowNumber,maxRowNumber=maxRowNumber,minColumnNumber=minColumnNumber,maxColumnNumber=maxColumnNumber,name=fullName,isColumnHeader=isColumnHeader,isRowHeader=isRowHeader) def _get_headerCellTracker(self): self.headerCellTracker=HeaderCellTracker() self.populateHeaderCellTrackerFromNames(self.headerCellTracker) return self.headerCellTracker def setAsHeaderCell(self,cell,isColumnHeader=False,isRowHeader=False): oldInfo=self.headerCellTracker.getHeaderCellInfoAt(cell.rowNumber,cell.columnNumber) if oldInfo: if isColumnHeader and not oldInfo.isColumnHeader: oldInfo.isColumnHeader=True oldInfo.rowSpan=cell.rowSpan elif isRowHeader and not oldInfo.isRowHeader: oldInfo.isRowHeader=True oldInfo.colSpan=cell.colSpan else: return False isColumnHeader=oldInfo.isColumnHeader isRowHeader=oldInfo.isRowHeader if isColumnHeader and isRowHeader: name="Title_" elif isRowHeader: name="RowTitle_" elif isColumnHeader: name="ColumnTitle_" else: raise ValueError("One or both of isColumnHeader or isRowHeader must be True") name+=uuid.uuid4().hex relativeName=name name="%s!%s"%(cell.excelRangeObject.worksheet.name,name) if oldInfo: self.excelWorksheetObject.parent.names(oldInfo.name).delete() oldInfo.name=name else: maxColumnNumber=self._getMaxColumnNumberForHeaderCell(cell.excelCellObject) self.headerCellTracker.addHeaderCellInfo(rowNumber=cell.rowNumber,columnNumber=cell.columnNumber,rowSpan=cell.rowSpan,colSpan=cell.colSpan,maxColumnNumber=maxColumnNumber,name=name,isColumnHeader=isColumnHeader,isRowHeader=isRowHeader) self.excelWorksheetObject.names.add(relativeName,cell.excelRangeObject) return True def _getMaxColumnNumberForHeaderCell(self,excelCell): try: r=excelCell.currentRegion except COMError: return excelCell.column columns=r.columns return columns[columns.count].column+1 def forgetHeaderCell(self,cell,isColumnHeader=False,isRowHeader=False): if not isColumnHeader and not isRowHeader: return False info=self.headerCellTracker.getHeaderCellInfoAt(cell.rowNumber,cell.columnNumber) if not info: return False if isColumnHeader and info.isColumnHeader: info.isColumnHeader=False elif isRowHeader and info.isRowHeader: info.isRowHeader=False else: return False self.headerCellTracker.removeHeaderCellInfo(info) self.excelWorksheetObject.parent.names(info.name).delete() if info.isColumnHeader or info.isRowHeader: self.setAsHeaderCell(cell,isColumnHeader=info.isColumnHeader,isRowHeader=info.isRowHeader) return True def fetchAssociatedHeaderCellText(self,cell,columnHeader=False): # #4409: cell.currentRegion fails if the worksheet is protected. try: cellRegion=cell.excelCellObject.currentRegion except COMError: log.debugWarning("Possibly protected sheet") return None for info in self.headerCellTracker.iterPossibleHeaderCellInfosFor(cell.rowNumber,cell.columnNumber,columnHeader=columnHeader): textList=[] if columnHeader: for headerRowNumber in xrange(info.rowNumber,info.rowNumber+info.rowSpan): headerCell=self.excelWorksheetObject.cells(headerRowNumber,cell.columnNumber) # The header could be merged cells. # if so, fetch text from the first in the merge as that always contains the content try: headerCell=headerCell.mergeArea.item(1) except (COMError,NameError,AttributeError): pass textList.append(headerCell.text) else: for headerColumnNumber in xrange(info.columnNumber,info.columnNumber+info.colSpan): headerCell=self.excelWorksheetObject.cells(cell.rowNumber,headerColumnNumber) # The header could be merged cells. # if so, fetch text from the first in the merge as that always contains the content try: headerCell=headerCell.mergeArea.item(1) except (COMError,NameError,AttributeError): pass textList.append(headerCell.text) text=" ".join(textList) if text: return text def __init__(self,windowHandle=None,excelWindowObject=None,excelWorksheetObject=None): self.excelWindowObject=excelWindowObject self.excelWorksheetObject=excelWorksheetObject super(ExcelWorksheet,self).__init__(windowHandle=windowHandle) for gesture in self.__changeSelectionGestures: self.bindGesture(gesture, "changeSelection") def _get_name(self): return self.excelWorksheetObject.name def _isEqual(self, other): if not super(ExcelWorksheet, self)._isEqual(other): return False return self.excelWorksheetObject.index == other.excelWorksheetObject.index def _get_firstChild(self): cell=self.excelWorksheetObject.cells(1,1) return ExcelCell(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelCellObject=cell) def _get_states(self): states=super(ExcelWorksheet,self).states if self.excelWorksheetObject.ProtectContents: states.add(controlTypes.STATE_PROTECTED) return states def script_changeSelection(self,gesture): oldSelection=api.getFocusObject() gesture.send() import eventHandler import time newSelection=None curTime=startTime=time.time() while (curTime-startTime)<=0.15: if scriptHandler.isScriptWaiting(): # Prevent lag if keys are pressed rapidly return if eventHandler.isPendingEvents('gainFocus'): return newSelection=self._getSelection() if newSelection and newSelection!=oldSelection: break api.processPendingEvents(processEventQueue=False) time.sleep(0.015) curTime=time.time() if newSelection: if oldSelection.parent==newSelection.parent: newSelection.parent=oldSelection.parent eventHandler.executeEvent('gainFocus',newSelection) script_changeSelection.canPropagate=True __changeSelectionGestures = ( "kb:tab", "kb:shift+tab", "kb:enter", "kb:numpadEnter", "kb:upArrow", "kb:downArrow", "kb:leftArrow", "kb:rightArrow", "kb:control+upArrow", "kb:control+downArrow", "kb:control+leftArrow", "kb:control+rightArrow", "kb:home", "kb:end", "kb:control+home", "kb:control+end", "kb:shift+upArrow", "kb:shift+downArrow", "kb:shift+leftArrow", "kb:shift+rightArrow", "kb:shift+control+upArrow", "kb:shift+control+downArrow", "kb:shift+control+leftArrow", "kb:shift+control+rightArrow", "kb:shift+home", "kb:shift+end", "kb:shift+control+home", "kb:shift+control+end", "kb:shift+space", "kb:control+space", "kb:pageUp", "kb:pageDown", "kb:shift+pageUp", "kb:shift+pageDown", "kb:alt+pageUp", "kb:alt+pageDown", "kb:alt+shift+pageUp", "kb:alt+shift+pageDown", "kb:control+shift+8", "kb:control+pageUp", "kb:control+pageDown", "kb:control+a", "kb:control+v", "kb:shift+f11", ) class ExcelCellTextInfo(NVDAObjectTextInfo): def _getFormatFieldAndOffsets(self,offset,formatConfig,calculateOffsets=True): formatField=textInfos.FormatField() if (self.obj.excelCellObject.Application.Version > "12.0"): cellObj=self.obj.excelCellObject.DisplayFormat else: cellObj=self.obj.excelCellObject fontObj=cellObj.font if formatConfig['reportAlignment']: value=alignmentLabels.get(self.obj.excelCellObject.horizontalAlignment) if value: formatField['text-align']=value value=alignmentLabels.get(self.obj.excelCellObject.verticalAlignment) if value: formatField['vertical-align']=value if formatConfig['reportFontName']: formatField['font-name']=fontObj.name if formatConfig['reportFontSize']: formatField['font-size']=str(fontObj.size) if formatConfig['reportFontAttributes']: formatField['bold']=fontObj.bold formatField['italic']=fontObj.italic underline=fontObj.underline formatField['underline']=False if underline is None or underline==xlUnderlineStyleNone else True if formatConfig['reportStyle']: try: styleName=self.obj.excelCellObject.style.nameLocal except COMError: styleName=None if styleName: formatField['style']=styleName if formatConfig['reportColor']: try: formatField['color']=colors.RGB.fromCOLORREF(int(fontObj.color)) except COMError: pass try: pattern = cellObj.Interior.Pattern formatField['background-pattern'] = backgroundPatternLabels.get(pattern) if pattern in (xlPatternLinearGradient, xlPatternRectangularGradient): formatField['background-color']=(colors.RGB.fromCOLORREF(int(cellObj.Interior.Gradient.ColorStops(1).Color))) formatField['background-color2']=(colors.RGB.fromCOLORREF(int(cellObj.Interior.Gradient.ColorStops(2).Color))) else: formatField['background-color']=colors.RGB.fromCOLORREF(int(cellObj.interior.color)) except COMError: pass if formatConfig["reportBorderStyle"]: borders = None hasMergedCells = self.obj.excelCellObject.mergeCells if hasMergedCells: mergeArea = self.obj.excelCellObject.mergeArea try: borders = mergeArea.DisplayFormat.borders # for later versions of office except COMError: borders = mergeArea.borders # for office 2007 else: borders = cellObj.borders try: formatField['border-style']=getCellBorderStyleDescription(borders,reportBorderColor=formatConfig['reportBorderColor']) except COMError: pass return formatField,(self._startOffset,self._endOffset) def _get_locationText(self): return self.obj.getCellPosition() class ExcelCell(ExcelBase): def doAction(self): pass def _get_columnHeaderText(self): return self.parent.fetchAssociatedHeaderCellText(self,columnHeader=True) def _get_rowHeaderText(self): return self.parent.fetchAssociatedHeaderCellText(self,columnHeader=False) def script_openDropdown(self,gesture): gesture.send() d=None curTime=startTime=time.time() while (curTime-startTime)<=0.25: if scriptHandler.isScriptWaiting(): # Prevent lag if keys are pressed rapidly return if eventHandler.isPendingEvents('gainFocus'): return d=self._getDropdown() if d: break api.processPendingEvents(processEventQueue=False) time.sleep(0.025) curTime=time.time() if not d: log.debugWarning("Failed to get dropDown, giving up") return d.parent=self eventHandler.queueEvent("gainFocus",d) def script_setColumnHeader(self,gesture): scriptCount=scriptHandler.getLastScriptRepeatCount() if not config.conf['documentFormatting']['reportTableHeaders']: # Translators: a message reported in the SetColumnHeader script for Excel. ui.message(_("Cannot set headers. Please enable reporting of table headers in Document Formatting Settings")) return if scriptCount==0: if self.parent.setAsHeaderCell(self,isColumnHeader=True,isRowHeader=False): # Translators: a message reported in the SetColumnHeader script for Excel. ui.message(_("Set {address} as start of column headers").format(address=self.cellCoordsText)) else: # Translators: a message reported in the SetColumnHeader script for Excel. ui.message(_("Already set {address} as start of column headers").format(address=self.cellCoordsText)) elif scriptCount==1: if self.parent.forgetHeaderCell(self,isColumnHeader=True,isRowHeader=False): # Translators: a message reported in the SetColumnHeader script for Excel. ui.message(_("Removed {address} from column headers").format(address=self.cellCoordsText)) else: # Translators: a message reported in the SetColumnHeader script for Excel. ui.message(_("Cannot find {address} in column headers").format(address=self.cellCoordsText)) script_setColumnHeader.__doc__=_("Pressing once will set this cell as the first column header for any cells lower and to the right of it within this region. Pressing twice will forget the current column header for this cell.") def script_setRowHeader(self,gesture): scriptCount=scriptHandler.getLastScriptRepeatCount() if not config.conf['documentFormatting']['reportTableHeaders']: # Translators: a message reported in the SetRowHeader script for Excel. ui.message(_("Cannot set headers. Please enable reporting of table headers in Document Formatting Settings")) return if scriptCount==0: if self.parent.setAsHeaderCell(self,isColumnHeader=False,isRowHeader=True): # Translators: a message reported in the SetRowHeader script for Excel. ui.message(_("Set {address} as start of row headers").format(address=self.cellCoordsText)) else: # Translators: a message reported in the SetRowHeader script for Excel. ui.message(_("Already set {address} as start of row headers").format(address=self.cellCoordsText)) elif scriptCount==1: if self.parent.forgetHeaderCell(self,isColumnHeader=False,isRowHeader=True): # Translators: a message reported in the SetRowHeader script for Excel. ui.message(_("Removed {address} from row headers").format(address=self.cellCoordsText)) else: # Translators: a message reported in the SetRowHeader script for Excel. ui.message(_("Cannot find {address} in row headers").format(address=self.cellCoordsText)) script_setRowHeader.__doc__=_("Pressing once will set this cell as the first row header for any cells lower and to the right of it within this region. Pressing twice will forget the current row header for this cell.") @classmethod def kwargsFromSuper(cls,kwargs,relation=None): windowHandle=kwargs['windowHandle'] excelWindowObject=cls.excelWindowObjectFromWindow(windowHandle) if not excelWindowObject: return False if isinstance(relation,tuple): excelCellObject=excelWindowObject.rangeFromPoint(relation[0],relation[1]) else: excelCellObject=excelWindowObject.ActiveCell if not excelCellObject: return False kwargs['excelWindowObject']=excelWindowObject kwargs['excelCellObject']=excelCellObject return True def __init__(self,windowHandle=None,excelWindowObject=None,excelCellObject=None): self.excelWindowObject=excelWindowObject self.excelCellObject=excelCellObject super(ExcelCell,self).__init__(windowHandle=windowHandle) def _get_excelRangeObject(self): return self.excelCellObject def _get_role(self): try: linkCount=self.excelCellObject.hyperlinks.count except (COMError,NameError,AttributeError): linkCount=None if linkCount: return controlTypes.ROLE_LINK return controlTypes.ROLE_TABLECELL TextInfo=ExcelCellTextInfo def _isEqual(self,other): if not super(ExcelCell,self)._isEqual(other): return False thisAddr=self.getCellAddress(self.excelCellObject,True) try: otherAddr=self.getCellAddress(other.excelCellObject,True) except COMError: #When cutting and pasting the old selection can become broken return False return thisAddr==otherAddr def _get_cellCoordsText(self): return self.getCellAddress(self.excelCellObject) def _get__rowAndColumnNumber(self): rc=self.excelCellObject.address(True,True,xlRC,False) return [int(x) if x else 1 for x in re_absRC.match(rc).groups()] def _get_rowNumber(self): return self._rowAndColumnNumber[0] rowSpan=1 def _get_columnNumber(self): return self._rowAndColumnNumber[1] colSpan=1 def getCellPosition(self): rowAndColumn = self.cellCoordsText sheet = self.excelWindowObject.ActiveSheet.name # Translators: a message reported in the get location text script for Excel. {0} is replaced with the name of the excel worksheet, and {1} is replaced with the row and column identifier EG "G4" return _(u"Sheet {0}, {1}").format(sheet, rowAndColumn) def _get_tableID(self): address=self.excelCellObject.address(1,1,0,1) ID="".join(address.split('!')[:-1]) ID="%s %s"%(ID,self.windowHandle) return ID def _get_name(self): return self.excelCellObject.Text def _getCurSummaryRowState(self): try: row=self.excelCellObject.rows[1] if row.summary: return controlTypes.STATE_EXPANDED if row.showDetail else controlTypes.STATE_COLLAPSED except COMError: pass def _getCurSummaryColumnState(self): try: col=self.excelCellObject.columns[1] if col.summary: return controlTypes.STATE_EXPANDED if col.showDetail else controlTypes.STATE_COLLAPSED except COMError: pass def _get_states(self): states=super(ExcelCell,self).states summaryCellState=self._getCurSummaryRowState() or self._getCurSummaryColumnState() if summaryCellState: states.add(summaryCellState) if self.excelCellObject.HasFormula: states.add(controlTypes.STATE_HASFORMULA) try: validationType=self.excelCellObject.validation.type except (COMError,NameError,AttributeError): validationType=None if validationType==3: states.add(controlTypes.STATE_HASPOPUP) try: comment=self.excelCellObject.comment except (COMError,NameError,AttributeError): comment=None if comment: states.add(controlTypes.STATE_HASCOMMENT) if self._overlapInfo is not None: if self._overlapInfo['obscuredFromRightBy'] > 0: states.add(controlTypes.STATE_CROPPED) if self._overlapInfo['obscuringRightBy'] > 0: states.add(controlTypes.STATE_OVERFLOWING) if self.excelWindowObject.ActiveSheet.ProtectContents and (not self.excelCellObject.Locked): states.add(controlTypes.STATE_UNLOCKED) return states def event_typedCharacter(self,ch): # #6570: You cannot type into protected cells. # Apart from speaking characters being miss-leading, Office 2016 protected view doubles characters as well. # Therefore for any character from space upwards (not control characters) on protected cells, play the default sound rather than speaking the character if ch>=" " and controlTypes.STATE_UNLOCKED not in self.states and controlTypes.STATE_PROTECTED in self.parent.states: winsound.PlaySound("Default",winsound.SND_ALIAS|winsound.SND_NOWAIT|winsound.SND_ASYNC) return super(ExcelCell,self).event_typedCharacter(ch) def getCellTextWidth(self): #handle to Device Context hDC = ctypes.windll.user32.GetDC(self.windowHandle) tempDC = ctypes.windll.gdi32.CreateCompatibleDC(hDC) ctypes.windll.user32.ReleaseDC(self.windowHandle, hDC) #Compatible Bitmap for current Device Context hBMP = ctypes.windll.gdi32.CreateCompatibleBitmap(tempDC, 1, 1) #handle to the bitmap object hOldBMP = ctypes.windll.gdi32.SelectObject(tempDC, hBMP) #Pass Device Context and LOGPIXELSX, the horizontal resolution in pixels per unit inch dpi = ctypes.windll.gdi32.GetDeviceCaps(tempDC, LOGPIXELSX) #Fetching Font Size and Weight information iFontSize = self.excelCellObject.Font.Size iFontSize = 11 if iFontSize is None else int(iFontSize) #Font Weight for Bold FOnt is 700 and for normal font it's 400 iFontWeight = 700 if self.excelCellObject.Font.Bold else 400 #Fetching Font Name and style information sFontName = self.excelCellObject.Font.Name sFontItalic = self.excelCellObject.Font.Italic sFontUnderline = True if self.excelCellObject.Font.Underline else False sFontStrikeThrough = self.excelCellObject.Font.Strikethrough #If FontSize is <0: The font mapper transforms this value into device units #and matches its absolute value against the character height of the available fonts. iFontHeight = iFontSize * -1 #If Font Width is 0, the font mapper chooses a closest match value. iFontWidth = 0 iEscapement = 0 iOrientation = 0 #Default CharSet based on System Locale is chosen iCharSet = 0 #Default font mapper behavior iOutputPrecision = 0 #Default clipping behavior iClipPrecision = 0 #Default Quality iOutputQuality = 0 #Default Pitch and default font family iPitchAndFamily = 0 #Create a font object with the correct size, weight and style hFont = ctypes.windll.gdi32.CreateFontW(iFontHeight, iFontWidth, iEscapement, iOrientation, iFontWeight, sFontItalic, sFontUnderline, sFontStrikeThrough, iCharSet, iOutputPrecision, iClipPrecision, iOutputQuality, iPitchAndFamily, sFontName) #Load the font into the device context, storing the original font object hOldFont = ctypes.windll.gdi32.SelectObject(tempDC, hFont) sText = self.excelCellObject.Text textLength = len(sText) class structText(ctypes.Structure): _fields_ = [("width", ctypes.c_int), ("height",ctypes.c_int)] StructText = structText() getTextExtentPoint = ctypes.windll.gdi32.GetTextExtentPoint32W getTextExtentPoint.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_int, ctypes.POINTER(structText)] getTextExtentPoint.restype = ctypes.c_int sText = unicode(sText) #Get the text dimensions ctypes.windll.gdi32.GetTextExtentPoint32W(tempDC, sText, textLength,ctypes.byref(StructText)) #Restore the old Font Object ctypes.windll.gdi32.SelectObject(tempDC, hOldFont) #Delete the font object we created ctypes.windll.gdi32.DeleteObject(hFont) #Restore the old Bitmap Object ctypes.windll.gdi32.SelectObject(tempDC, hOldBMP) #Delete the temporary BitMap Object ctypes.windll.gdi32.DeleteObject(hBMP) #Release & Delete the device context ctypes.windll.gdi32.DeleteDC(tempDC) #Retrieve the text width textWidth = StructText.width return textWidth def _get__overlapInfo(self): textWidth = self.getCellTextWidth() if self.excelCellObject.WrapText or self.excelCellObject.ShrinkToFit: return None isMerged = self.excelWindowObject.Selection.MergeCells try: adjacentCell = self.excelCellObject.Offset(0,1) except COMError: # #5041: This cell is at the right edge. # For our purposes, treat this as if there is an empty cell to the right. isAdjacentCellEmpty = True else: isAdjacentCellEmpty = not adjacentCell.Text info = {} if isMerged: columns=self.excelCellObject.mergeArea.columns columnCount=columns.count firstColumn=columns.item(1) lastColumn=columns.item(columnCount) firstColumnLeft=firstColumn.left lastColumnLeft=lastColumn.left lastColumnWidth=lastColumn.width cellLeft=firstColumnLeft cellRight=lastColumnLeft+lastColumnWidth else: cellLeft=self.excelCellObject.left cellRight=cellLeft+self.excelCellObject.width pointsToPixels=self.excelCellObject.Application.ActiveWindow.PointsToScreenPixelsX cellLeft=pointsToPixels(cellLeft) cellRight=pointsToPixels(cellRight) cellWidth=(cellRight-cellLeft) if textWidth <= cellWidth: info = None else: if isAdjacentCellEmpty: info['obscuringRightBy']= textWidth - cellWidth info['obscuredFromRightBy'] = 0 else: info['obscuredFromRightBy']= textWidth - cellWidth info['obscuringRightBy'] = 0 self._overlapInfo = info return self._overlapInfo def _get_parent(self): worksheet=self.excelCellObject.Worksheet self.parent=ExcelWorksheet(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelWorksheetObject=worksheet) return self.parent def _get_next(self): try: next=self.excelCellObject.next except COMError: next=None if next: return ExcelCell(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelCellObject=next) def _get_previous(self): try: previous=self.excelCellObject.previous except COMError: previous=None if previous: return ExcelCell(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelCellObject=previous) def _get_description(self): try: inputMessageTitle=self.excelCellObject.validation.inputTitle except (COMError,NameError,AttributeError): inputMessageTitle=None try: inputMessage=self.excelCellObject.validation.inputMessage except (COMError,NameError,AttributeError): inputMessage=None if inputMessage and inputMessageTitle: return _("Input Message is {title}: {message}").format( title = inputMessageTitle , message = inputMessage) elif inputMessage: return _("Input Message is {message}").format( message = inputMessage) else: return None def _get_positionInfo(self): try: level=int(self.excelCellObject.rows[1].outlineLevel)-1 except COMError: level=None if level==0: try: level=int(self.excelCellObject.columns[1].outlineLevel)-1 except COMError: level=None if level==0: level=None return {'level':level} def script_reportComment(self,gesture): commentObj=self.excelCellObject.comment text=commentObj.text() if commentObj else None if text: ui.message(text) else: # Translators: A message in Excel when there is no comment ui.message(_("Not on a comment")) # Translators: the description for a script for Excel script_reportComment.__doc__=_("Reports the comment on the current cell") def script_editComment(self,gesture): commentObj=self.excelCellObject.comment d = wx.TextEntryDialog(gui.mainFrame, # Translators: Dialog text for _("Editing comment for cell {address}").format(address=self.cellCoordsText), # Translators: Title of a dialog edit an Excel comment _("Comment"), defaultValue=commentObj.text() if commentObj else u"", style=wx.TE_MULTILINE|wx.OK|wx.CANCEL) def callback(result): if result == wx.ID_OK: if commentObj: commentObj.text(d.Value) else: self.excelCellObject.addComment(d.Value) gui.runScriptModalDialog(d, callback) def reportFocus(self): # #4878: Excel specific code for speaking format changes on the focused object. info=self.makeTextInfo(textInfos.POSITION_FIRST) info.expand(textInfos.UNIT_CHARACTER) formatField=textInfos.FormatField() formatConfig=config.conf['documentFormatting'] for field in info.getTextWithFields(formatConfig): if isinstance(field,textInfos.FieldCommand) and isinstance(field.field,textInfos.FormatField): formatField.update(field.field) if not hasattr(self.parent,'_formatFieldSpeechCache'): self.parent._formatFieldSpeechCache={} text=speech.getFormatFieldSpeech(formatField,attrsCache=self.parent._formatFieldSpeechCache,formatConfig=formatConfig) if formatField else None if text: speech.speakText(text) super(ExcelCell,self).reportFocus() __gestures = { "kb:NVDA+shift+c": "setColumnHeader", "kb:NVDA+shift+r": "setRowHeader", "kb:shift+f2":"editComment", "kb:alt+downArrow":"openDropdown", "kb:NVDA+alt+c":"reportComment", } class ExcelSelection(ExcelBase): role=controlTypes.ROLE_TABLECELL def __init__(self,windowHandle=None,excelWindowObject=None,excelRangeObject=None): self.excelWindowObject=excelWindowObject self.excelRangeObject=excelRangeObject super(ExcelSelection,self).__init__(windowHandle=windowHandle) def _get_states(self): states=super(ExcelSelection,self).states states.add(controlTypes.STATE_SELECTED) return states def _get_name(self): firstCell=self.excelRangeObject.Item(1) lastCell=self.excelRangeObject.Item(self.excelRangeObject.Count) # Translators: This is presented in Excel to show the current selection, for example 'a1 c3 through a10 c10' return _("{firstAddress} {firstContent} through {lastAddress} {lastContent}").format(firstAddress=self.getCellAddress(firstCell),firstContent=firstCell.Text,lastAddress=self.getCellAddress(lastCell),lastContent=lastCell.Text) def _get_parent(self): worksheet=self.excelRangeObject.Worksheet return ExcelWorksheet(windowHandle=self.windowHandle,excelWindowObject=self.excelWindowObject,excelWorksheetObject=worksheet) def _get_rowNumber(self): return self.excelRangeObject.row def _get_rowSpan(self): return self.excelRangeObject.rows.count def _get_columnNumber(self): return self.excelRangeObject.column def _get_colSpan(self): return self.excelRangeObject.columns.count #Its useful for an excel selection to be announced with reportSelection script def makeTextInfo(self,position): if position==textInfos.POSITION_SELECTION: position=textInfos.POSITION_ALL return super(ExcelSelection,self).makeTextInfo(position) class ExcelDropdownItem(Window): firstChild=None lastChild=None children=[] role=controlTypes.ROLE_LISTITEM def __init__(self,parent=None,name=None,states=None,index=None): self.name=name self.states=states self.parent=parent self.index=index super(ExcelDropdownItem,self).__init__(windowHandle=parent.windowHandle) def _get_previous(self): newIndex=self.index-1 if newIndex>=0: return self.parent.getChildAtIndex(newIndex) def _get_next(self): newIndex=self.index+1 if newIndex<self.parent.childCount: return self.parent.getChildAtIndex(newIndex) def _get_treeInterceptor(self): return self.parent.treeInterceptor def _get_positionInfo(self): return {'indexInGroup':self.index+1,'similarItemsInGroup':self.parent.childCount,} class ExcelDropdown(Window): @classmethod def kwargsFromSuper(cls,kwargs,relation=None): return kwargs role=controlTypes.ROLE_LIST excelCell=None def _get__highlightColors(self): background=colors.RGB.fromCOLORREF(winUser.user32.GetSysColor(13)) foreground=colors.RGB.fromCOLORREF(winUser.user32.GetSysColor(14)) self._highlightColors=(background,foreground) return self._highlightColors def _get_children(self): children=[] index=0 states=set() for item in DisplayModelTextInfo(self,textInfos.POSITION_ALL).getTextWithFields(): if isinstance(item,textInfos.FieldCommand) and item.command=="formatChange": states=set([controlTypes.STATE_SELECTABLE]) foreground=item.field.get('color',None) background=item.field.get('background-color',None) if (background,foreground)==self._highlightColors: states.add(controlTypes.STATE_SELECTED) if isinstance(item,basestring): obj=ExcelDropdownItem(parent=self,name=item,states=states,index=index) children.append(obj) index+=1 return children def getChildAtIndex(self,index): return self.children[index] def _get_childCount(self): return len(self.children) def _get_firstChild(self): return self.children[0] def _get_selection(self): for child in self.children: if controlTypes.STATE_SELECTED in child.states: return child def script_selectionChange(self,gesture): gesture.send() newFocus=self.selection or self if eventHandler.lastQueuedFocusObject is newFocus: return eventHandler.queueEvent("gainFocus",newFocus) script_selectionChange.canPropagate=True def script_closeDropdown(self,gesture): gesture.send() eventHandler.queueEvent("gainFocus",self.parent) script_closeDropdown.canPropagate=True __gestures={ "kb:downArrow":"selectionChange", "kb:upArrow":"selectionChange", "kb:leftArrow":"selectionChange", "kb:rightArrow":"selectionChange", "kb:home":"selectionChange", "kb:end":"selectionChange", "kb:escape":"closeDropdown", "kb:enter":"closeDropdown", "kb:space":"closeDropdown", } def event_gainFocus(self): child=self.selection if not child and self.childCount>0: child=self.children[0] if child: eventHandler.queueEvent("focusEntered",self) eventHandler.queueEvent("gainFocus",child) else: super(ExcelDropdown,self).event_gainFocus() class ExcelMergedCell(ExcelCell): def _get_cellCoordsText(self): return self.getCellAddress(self.excelCellObject.mergeArea) def _get_rowSpan(self): return self.excelCellObject.mergeArea.rows.count def _get_colSpan(self): return self.excelCellObject.mergeArea.columns.count class ExcelFormControl(ExcelBase): isFocusable=True _roleMap = { xlButtonControl: controlTypes.ROLE_BUTTON, xlCheckBox: controlTypes.ROLE_CHECKBOX, xlDropDown: controlTypes.ROLE_COMBOBOX, xlEditBox: controlTypes.ROLE_EDITABLETEXT, xlGroupBox: controlTypes.ROLE_BOX, xlLabel: controlTypes.ROLE_LABEL, xlListBox: controlTypes.ROLE_LIST, xlOptionButton: controlTypes.ROLE_RADIOBUTTON, xlScrollBar: controlTypes.ROLE_SCROLLBAR, xlSpinner: controlTypes.ROLE_SPINBUTTON, } def _get_excelControlFormatObject(self): return self.excelFormControlObject.controlFormat def _get_excelOLEFormatObject(self): return self.excelFormControlObject.OLEFormat.object def __init__(self,windowHandle=None,parent=None,excelFormControlObject=None): self.parent=parent self.excelFormControlObject=excelFormControlObject super(ExcelFormControl,self).__init__(windowHandle=windowHandle) def _get_role(self): try: if self.excelFormControlObject.Type==msoFormControl: formControlType=self.excelFormControlObject.FormControlType else: formControlType=None except: return None return self._roleMap[formControlType] def _get_states(self): states=super(ExcelFormControl,self).states if self is api.getFocusObject(): states.add(controlTypes.STATE_FOCUSED) newState=None if self.role==controlTypes.ROLE_RADIOBUTTON: newState=controlTypes.STATE_CHECKED if self.excelOLEFormatObject.Value==checked else None elif self.role==controlTypes.ROLE_CHECKBOX: if self.excelOLEFormatObject.Value==checked: newState=controlTypes.STATE_CHECKED elif self.excelOLEFormatObject.Value==mixed: newState=controlTypes.STATE_HALFCHECKED if newState: states.add(newState) return states def _get_name(self): if self.excelFormControlObject.AlternativeText: return self.excelFormControlObject.AlternativeText+" "+self.excelFormControlObject.TopLeftCell.address(False,False,1,False) + "-" + self.excelFormControlObject.BottomRightCell.address(False,False,1,False) else: return self.excelFormControlObject.Name+" "+self.excelFormControlObject.TopLeftCell.address(False,False,1,False) + "-" + self.excelFormControlObject.BottomRightCell.address(False,False,1,False) def _get_index(self): return self.excelFormControlObject.ZOrderPosition def _get_topLeftCell(self): return self.excelFormControlObject.TopLeftCell def _get_bottomRightCell(self): return self.excelFormControlObject.BottomRightCell def _getFormControlScreenCoordinates(self): topLeftAddress=self.topLeftCell bottomRightAddress=self.bottomRightCell #top left cell's width in points topLeftCellWidth=topLeftAddress.Width #top left cell's height in points topLeftCellHeight=topLeftAddress.Height #bottom right cell's width in points bottomRightCellWidth=bottomRightAddress.Width #bottom right cell's height in points bottomRightCellHeight=bottomRightAddress.Height self.excelApplicationObject=self.parent.excelWorksheetObject.Application hDC = ctypes.windll.user32.GetDC(None) #pixels per inch along screen width px = ctypes.windll.gdi32.GetDeviceCaps(hDC, LOGPIXELSX) #pixels per inch along screen height py = ctypes.windll.gdi32.GetDeviceCaps(hDC, LOGPIXELSY) ctypes.windll.user32.ReleaseDC(None, hDC) zoom=self.excelApplicationObject.ActiveWindow.Zoom zoomRatio=zoom/100 #Conversion from inches to Points, 1 inch=72points pointsPerInch = self.excelApplicationObject.InchesToPoints(1) #number of pixels from the left edge of the spreadsheet's window to the left edge the first column in the spreadsheet. X=self.excelApplicationObject.ActiveWindow.PointsToScreenPixelsX(0) #number of pixels from the top edge of the spreadsheet's window to the top edge the first row in the spreadsheet, Y=self.excelApplicationObject.ActiveWindow.PointsToScreenPixelsY(0) if topLeftAddress==bottomRightAddress: #Range.Left: The distance, in points, from the left edge of column A to the left edge of the range. X=int(X + (topLeftAddress.Left+topLeftCellWidth/2) * zoomRatio * px / pointsPerInch) #Range.Top: The distance, in points, from the top edge of Row 1 to the top edge of the range. Y=int(Y + (topLeftAddress.Top+topLeftCellHeight/2) * zoomRatio * py / pointsPerInch) return (X,Y) else: screenTopLeftX=int(X + (topLeftCellWidth/2 + topLeftAddress.Left) * zoomRatio * px / pointsPerInch) screenBottomRightX=int(X + (bottomRightCellWidth/2+bottomRightAddress.Left) * zoomRatio * px / pointsPerInch) screenTopLeftY = int(Y + (topLeftCellHeight/2+ topLeftAddress.Top) * zoomRatio * py / pointsPerInch) screenBottomRightY=int(Y + (bottomRightCellHeight/2+ bottomRightAddress.Top) * zoomRatio * py / pointsPerInch) return (int(0.5*(screenTopLeftX+screenBottomRightX)), int(0.5*(screenTopLeftY+screenBottomRightY))) def script_doAction(self,gesture): self.doAction() script_doAction.canPropagate=True def doAction(self): (x,y)=self._getFormControlScreenCoordinates() winUser.setCursorPos(x,y) #perform Mouse Left-Click winUser.mouse_event(winUser.MOUSEEVENTF_LEFTDOWN,0,0,None,None) winUser.mouse_event(winUser.MOUSEEVENTF_LEFTUP,0,0,None,None) self.invalidateCache() wx.CallLater(100,eventHandler.executeEvent,"stateChange",self) __gestures= { "kb:enter":"doAction", "kb:space":"doAction", "kb(desktop):numpadEnter":"doAction", } class ExcelFormControlQuickNavItem(ExcelQuickNavItem): def __init__( self , nodeType , document , formControlObject , formControlCollection, treeInterceptorObj ): super( ExcelFormControlQuickNavItem ,self).__init__( nodeType , document , formControlObject , formControlCollection ) self.formControlObjectIndex = formControlObject.ZOrderPosition self.treeInterceptorObj=treeInterceptorObj _label=None @property def label(self): if self._label: return self._label alternativeText=self.excelItemObject.AlternativeText if alternativeText: self._label=alternativeText+" "+self.excelItemObject.Name+" " + self.excelItemObject.TopLeftCell.address(False,False,1,False) + "-" + self.excelItemObject.BottomRightCell.address(False,False,1,False) else: self._label=self.excelItemObject.Name + " " + self.excelItemObject.TopLeftCell.address(False,False,1,False) + "-" + self.excelItemObject.BottomRightCell.address(False,False,1,False) return self._label _nvdaObj=None @property def nvdaObj(self): if self._nvdaObj: return self._nvdaObj formControlType=self.excelItemObject.formControlType if formControlType ==xlListBox: self._nvdaObj=ExcelFormControlListBox(windowHandle=self.treeInterceptorObj.rootNVDAObject.windowHandle,parent=self.treeInterceptorObj.rootNVDAObject,excelFormControlObject=self.excelItemObject) elif formControlType ==xlDropDown: self._nvdaObj=ExcelFormControlDropDown(windowHandle=self.treeInterceptorObj.rootNVDAObject.windowHandle,parent=self.treeInterceptorObj.rootNVDAObject,excelFormControlObject=self.excelItemObject) elif formControlType in (xlScrollBar,xlSpinner): self._nvdaObj=ExcelFormControlScrollBar(windowHandle=self.treeInterceptorObj.rootNVDAObject.windowHandle,parent=self.treeInterceptorObj.rootNVDAObject,excelFormControlObject=self.excelItemObject) else: self._nvdaObj=ExcelFormControl(windowHandle=self.treeInterceptorObj.rootNVDAObject.windowHandle,parent=self.treeInterceptorObj.rootNVDAObject,excelFormControlObject=self.excelItemObject) self._nvdaObj.treeInterceptor=self.treeInterceptorObj return self._nvdaObj def __lt__(self,other): return self.formControlObjectIndex < other.formControlObjectIndex def moveTo(self): self.excelItemObject.TopLeftCell.Select self.excelItemObject.TopLeftCell.Activate() if self.treeInterceptorObj.passThrough: self.treeInterceptorObj.passThrough=False browseMode.reportPassThrough(self.treeInterceptorObj) eventHandler.queueEvent("gainFocus",self.nvdaObj) @property def isAfterSelection(self): activeCell = self.document.Application.ActiveCell if self.excelItemObject.TopLeftCell.row == activeCell.row: if self.excelItemObject.TopLeftCell.column > activeCell.column: return False elif self.excelItemObject.TopLeftCell.row > activeCell.row: return False return True class ExcelFormControlQuicknavIterator(ExcelQuicknavIterator): quickNavItemClass=ExcelFormControlQuickNavItem def __init__(self, itemType , document , direction , includeCurrent,treeInterceptorObj): super(ExcelFormControlQuicknavIterator,self).__init__(itemType , document , direction , includeCurrent) self.treeInterceptorObj=treeInterceptorObj def collectionFromWorksheet( self , worksheetObject ): try: return worksheetObject.Shapes except(COMError): return None def iterate(self, position): """ returns a generator that emits L{QuickNavItem} objects for this collection. @param position: an excelRangeObject representing either the TopLeftCell of the currently selected form control or ActiveCell in a worksheet """ # Returns the Row containing TopLeftCell of an item def topLeftCellRow(item): row=item.TopLeftCell.Row # Cache row on the COM object as we need it later item._comobj.excelRow=row return row items=self.collectionFromWorksheet(self.document) if not items: return items=sorted(items,key=topLeftCellRow) if position: rangeObj=position.excelRangeObject row = rangeObj.Row col = rangeObj.Column if self.direction=="next": for collectionItem in items: itemRow=collectionItem._comobj.excelRow if (itemRow>row or (itemRow==row and collectionItem.TopLeftCell.Column>col)) and self.filter(collectionItem): item=self.quickNavItemClass(self.itemType,self.document,collectionItem,items,self.treeInterceptorObj) yield item elif self.direction=="previous": for collectionItem in reversed(items): itemRow=collectionItem._comobj.excelRow if (itemRow<row or (itemRow==row and collectionItem.TopLeftCell.Column<col)) and self.filter(collectionItem): item=self.quickNavItemClass(self.itemType,self.document,collectionItem,items,self.treeInterceptorObj ) yield item else: for collectionItem in items: if self.filter(collectionItem): item=self.quickNavItemClass(self.itemType,self.document,collectionItem , items,self.treeInterceptorObj ) yield item def filter(self,shape): if shape.Type == msoFormControl: if shape.FormControlType == xlGroupBox or shape.Visible != msoTrue: return False else: return True else: return False class ExcelFormControlListBox(ExcelFormControl): def __init__(self,windowHandle=None,parent=None,excelFormControlObject=None): super(ExcelFormControlListBox,self).__init__(windowHandle=windowHandle, parent=parent, excelFormControlObject=excelFormControlObject) try: self.listSize=int(self.excelControlFormatObject.ListCount) except: self.listSize=0 try: self.selectedItemIndex= int(self.excelControlFormatObject.ListIndex) except: self.selectedItemIndex=0 try: self.isMultiSelectable= self.excelControlFormatObject.multiSelect!=xlNone except: self.isMultiSelectable=False def getChildAtIndex(self,index): name=str(self.excelOLEFormatObject.List(index+1)) states=set([controlTypes.STATE_SELECTABLE]) if self.excelOLEFormatObject.Selected[index+1]==True: states.add(controlTypes.STATE_SELECTED) return ExcelDropdownItem(parent=self,name=name,states=states,index=index) def _get_childCount(self): return self.listSize def _get_firstChild(self): if self.listSize>0: return self.getChildAtIndex(0) def _get_lastChild(self): if self.listSize>0: return self.getChildAtIndex(self.listSize-1) def script_moveUp(self, gesture): if self.selectedItemIndex > 1: self.selectedItemIndex= self.selectedItemIndex - 1 if not self.isMultiSelectable: try: self.excelOLEFormatObject.Selected[self.selectedItemIndex] = True except: pass child=self.getChildAtIndex(self.selectedItemIndex-1) if child: eventHandler.queueEvent("gainFocus",child) script_moveUp.canPropagate=True def script_moveDown(self, gesture): if self.selectedItemIndex < self.listSize: self.selectedItemIndex= self.selectedItemIndex + 1 if not self.isMultiSelectable: try: self.excelOLEFormatObject.Selected[self.selectedItemIndex] = True except: pass child=self.getChildAtIndex(self.selectedItemIndex-1) if child: eventHandler.queueEvent("gainFocus",child) script_moveDown.canPropagate=True def doAction(self): if self.isMultiSelectable: try: lb=self.excelOLEFormatObject lb.Selected[self.selectedItemIndex] =not lb.Selected[self.selectedItemIndex] except: return child=self.getChildAtIndex(self.selectedItemIndex-1) eventHandler.queueEvent("gainFocus",child) __gestures= { "kb:upArrow": "moveUp", "kb:downArrow":"moveDown", } class ExcelFormControlDropDown(ExcelFormControl): def __init__(self,windowHandle=None,parent=None,excelFormControlObject=None): super(ExcelFormControlDropDown,self).__init__(windowHandle=windowHandle, parent=parent, excelFormControlObject=excelFormControlObject) try: self.listSize=self.excelControlFormatObject.ListCount except: self.listSize=0 try: self.selectedItemIndex=self.excelControlFormatObject.ListIndex except: self.selectedItemIndex=0 def script_moveUp(self, gesture): if self.selectedItemIndex > 1: self.selectedItemIndex= self.selectedItemIndex - 1 self.excelOLEFormatObject.Selected[self.selectedItemIndex] = True eventHandler.queueEvent("valueChange",self) script_moveUp.canPropagate=True def script_moveDown(self, gesture): if self.selectedItemIndex < self.listSize: self.selectedItemIndex= self.selectedItemIndex + 1 self.excelOLEFormatObject.Selected[self.selectedItemIndex] = True eventHandler.queueEvent("valueChange",self) script_moveDown.canPropagate=True def _get_value(self): if self.selectedItemIndex < self.listSize: return str(self.excelOLEFormatObject.List(self.selectedItemIndex)) __gestures= { "kb:upArrow": "moveUp", "kb:downArrow":"moveDown", } class ExcelFormControlScrollBar(ExcelFormControl): def __init__(self,windowHandle=None,parent=None,excelFormControlObject=None): super(ExcelFormControlScrollBar,self).__init__(windowHandle=windowHandle, parent=parent, excelFormControlObject=excelFormControlObject) try: self.minValue=self.excelControlFormatObject.min except: self.minValue=0 try: self.maxValue=self.excelControlFormatObject.max except: self.maxValue=0 try: self.smallChange=self.excelControlFormatObject.smallChange except: self.smallChange=0 try: self.largeChange=self.excelControlFormatObject.largeChange except: self.largeChange=0 def _get_value(self): try: return str(self.excelControlFormatObject.value) except COMError: return 0 def moveValue(self,up=False,large=False): try: curValue=self.excelControlFormatObject.value except COMError: return if up: newValue=min(curValue+(self.largeChange if large else self.smallChange),self.maxValue) else: newValue=max(curValue-(self.largeChange if large else self.smallChange),self.minValue) self.excelControlFormatObject.value=newValue eventHandler.queueEvent("valueChange",self) def script_moveUpSmall(self,gesture): self.moveValue(True,False) def script_moveDownSmall(self,gesture): self.moveValue(False,False) def script_moveUpLarge(self,gesture): self.moveValue(True,True) def script_moveDownLarge(self,gesture): self.moveValue(False,True) __gestures={ "kb:upArrow":"moveUpSmall", "kb:downArrow":"moveDownSmall", "kb:pageUp":"moveUpLarge", "kb:pageDown":"moveDownLarge", }
1
19,830
I think its worth stating this is `versionMajor`
nvaccess-nvda
py
@@ -375,3 +375,14 @@ func (nc NodeController) readGenesisJSON(genesisFile string) (genesisLedger book err = protocol.DecodeJSON(genesisText, &genesisLedger) return } + +// SetConsensus applies a new consensus settings which would get deployed before +// any of the nodes starts +func (nc NodeController) SetConsensus(consensus config.ConsensusProtocols) error { + return config.SaveConfigurableConsensus(nc.algodDataDir, consensus) +} + +// GetConsensus rebuild the consensus version from the data directroy +func (nc NodeController) GetConsensus() (config.ConsensusProtocols, error) { + return config.PreloadConfigurableConsensusProtocols(nc.algodDataDir) +}
1
// Copyright (C) 2019-2020 Algorand, Inc. // This file is part of go-algorand // // go-algorand is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as // published by the Free Software Foundation, either version 3 of the // License, or (at your option) any later version. // // go-algorand is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Affero General Public License for more details. // // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see <https://www.gnu.org/licenses/>. package nodecontrol import ( "fmt" "io/ioutil" "net/url" "os" "os/exec" "path/filepath" "strconv" "time" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/daemon/algod/api/client" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/tokens" ) // StdErrFilename is the name of the file in <datadir> where stderr will be captured if not redirected to host const StdErrFilename = "algod-err.log" // StdOutFilename is the name of the file in <datadir> where stdout will be captured if not redirected to host const StdOutFilename = "algod-out.log" // AlgodClient attempts to build a client.RestClient for communication with // the algod REST API, but fails if we can't find the net file func (nc NodeController) AlgodClient() (algodClient client.RestClient, err error) { algodAPIToken, err := tokens.GetAndValidateAPIToken(nc.algodDataDir, tokens.AlgodTokenFilename) if err != nil { return } // Fetch the server URL from the net file, if it exists algodURL, err := nc.ServerURL() if err != nil { return } // Build the client from the URL and API token algodClient = client.MakeRestClient(algodURL, algodAPIToken) return } // ServerURL returns the appropriate URL for the node under control func (nc NodeController) ServerURL() (url.URL, error) { addr, err := nc.GetHostAddress() if err != nil { return url.URL{}, err } return url.URL{Scheme: "http", Host: addr}, nil } // GetHostAddress retrieves the REST address for the node from its algod.net file. func (nc NodeController) GetHostAddress() (string, error) { // For now, we want the old behavior to 'just work'; // so if data directory is not specified, we assume the default address of 127.0.0.1:8080 if len(nc.algodDataDir) == 0 { return "127.0.0.1:8080", nil } return util.GetFirstLineFromFile(nc.algodNetFile) } // buildAlgodCommand func (nc NodeController) buildAlgodCommand(args AlgodStartArgs) *exec.Cmd { startArgs := make([]string, 0) startArgs = append(startArgs, "-d") startArgs = append(startArgs, nc.algodDataDir) if len(args.TelemetryOverride) > 0 { startArgs = append(startArgs, "-t") startArgs = append(startArgs, args.TelemetryOverride) } // Parse peerDial and listenIP cmdline flags peerDial := args.PeerAddress if len(peerDial) > 0 { startArgs = append(startArgs, "-p") startArgs = append(startArgs, peerDial) } listenIP := args.ListenIP if len(listenIP) > 0 { startArgs = append(startArgs, "-l") startArgs = append(startArgs, listenIP) } // Check if we should be using algoh var cmd string if args.RunUnderHost { cmd = nc.algoh } else { cmd = nc.algod } return exec.Command(cmd, startArgs...) } // algodRunning returns a boolean indicating if algod is running func (nc NodeController) algodRunning() (isRunning bool) { _, err := nc.GetAlgodPID() if err == nil { // no error means file already exists, and we just loaded its content. // check if we can communicate with it. algodClient, err := nc.AlgodClient() if err == nil { err = algodClient.HealthCheck() if err == nil { // yes, we can communicate with it. return true } } } return false } // StopAlgod reads the net file and kills the algod process func (nc *NodeController) StopAlgod() (alreadyStopped bool, err error) { // Find algod PID algodPID, err := nc.GetAlgodPID() if err == nil { // Kill algod by PID err = killPID(int(algodPID)) if err != nil { return } } else { err = nil alreadyStopped = true } return } // StartAlgod spins up an algod process and waits for it to begin func (nc *NodeController) StartAlgod(args AlgodStartArgs) (alreadyRunning bool, err error) { // If algod is already running, we can't start again alreadyRunning = nc.algodRunning() if alreadyRunning { return alreadyRunning, nil } algodCmd := nc.buildAlgodCommand(args) var errLogger, outLogger *LaggedStdIo if args.RedirectOutput { errLogger = NewLaggedStdIo(os.Stderr, "algod") outLogger = NewLaggedStdIo(os.Stdout, "algod") algodCmd.Stderr = errLogger algodCmd.Stdout = outLogger } else if !args.RunUnderHost { // If not redirecting output to the host, we want to capture stderr and stdout to files files := nc.setAlgodCmdLogFiles(algodCmd) // Descriptors will get dup'd after exec, so OK to close when we return for _, file := range files { defer file.Close() } } err = algodCmd.Start() if err != nil { return } if args.RedirectOutput { // update the logger output prefix with the process id. linePrefix := fmt.Sprintf("algod(%d)", algodCmd.Process.Pid) errLogger.SetLinePrefix(linePrefix) outLogger.SetLinePrefix(linePrefix) } // Wait on the algod process and check if exits algodExitChan := make(chan struct{}) startAlgodCompletedChan := make(chan struct{}) defer close(startAlgodCompletedChan) go func() { // this Wait call is important even beyond the scope of this function; it allows the system to // move the process from a "zombie" state into "done" state, and is required for the Signal(0) test. err := algodCmd.Wait() select { case <-startAlgodCompletedChan: // we've already exited this function, so we want to report to the error to the callback. if args.ExitErrorCallback != nil { args.ExitErrorCallback(nc, err) } default: } algodExitChan <- struct{}{} }() success := false for !success { select { case <-algodExitChan: return false, errAlgodExitedEarly case <-time.After(time.Millisecond * 100): // If we can't talk to the API yet, spin algodClient, err := nc.AlgodClient() if err != nil { continue } // See if the server is up err = algodClient.HealthCheck() if err == nil { success = true continue } // Perhaps we're running an old version with no HealthCheck endpoint? _, err = algodClient.Status() if err == nil { success = true } } } return } // GetListeningAddress retrieves the listening address from the algod-listen.net file for the node func (nc NodeController) GetListeningAddress() (string, error) { return util.GetFirstLineFromFile(nc.algodNetListenFile) } // GetAlgodPID returns the PID from the algod.pid file in the node's data directory, or an error func (nc NodeController) GetAlgodPID() (pid int64, err error) { // Pull out the PID, ignoring newlines pidStr, err := util.GetFirstLineFromFile(nc.algodPidFile) if err != nil { return -1, err } // Parse as an integer pid, err = strconv.ParseInt(pidStr, 10, 32) return } // GetDataDir provides read-only access to the controller's data directory func (nc NodeController) GetDataDir() string { return nc.algodDataDir } // GetAlgodPath provides read-only access to the controller's algod instance func (nc NodeController) GetAlgodPath() string { return nc.algod } // Clone creates a new DataDir based on the controller's DataDir; if copyLedger is true, we'll clone the ledger.sqlite file func (nc NodeController) Clone(targetDir string, copyLedger bool) (err error) { os.RemoveAll(targetDir) err = os.Mkdir(targetDir, 0700) if err != nil && !os.IsExist(err) { return } // Copy Core Files, silently failing to copy any that don't exist files := []string{config.GenesisJSONFile, config.ConfigFilename, config.PhonebookFilename} for _, file := range files { src := filepath.Join(nc.algodDataDir, file) if util.FileExists(src) { dest := filepath.Join(targetDir, file) _, err = util.CopyFile(src, dest) if err != nil { switch err.(type) { case *os.PathError: continue default: return } } } } // Copy Ledger Files if requested if copyLedger { var genesis bookkeeping.Genesis genesis, err = nc.readGenesisJSON(filepath.Join(nc.algodDataDir, config.GenesisJSONFile)) if err != nil { return } genesisFolder := filepath.Join(nc.algodDataDir, genesis.ID()) targetGenesisFolder := filepath.Join(targetDir, genesis.ID()) err = os.Mkdir(targetGenesisFolder, 0770) if err != nil { return } files := []string{"ledger.sqlite"} for _, file := range files { src := filepath.Join(genesisFolder, file) dest := filepath.Join(targetGenesisFolder, file) _, err = util.CopyFile(src, dest) if err != nil { return } } } return } // GetGenesis returns the current genesis for our instance func (nc NodeController) GetGenesis() (bookkeeping.Genesis, error) { var genesis bookkeeping.Genesis genesisFile := filepath.Join(nc.GetDataDir(), config.GenesisJSONFile) genesisText, err := ioutil.ReadFile(genesisFile) if err != nil { return genesis, err } err = protocol.DecodeJSON(genesisText, &genesis) if err != nil { return genesis, err } return genesis, nil } // GetGenesisDir returns the current genesis directory for our instance func (nc NodeController) GetGenesisDir() (string, error) { genesis, err := nc.GetGenesis() if err != nil { return "", err } genesisDir := filepath.Join(nc.GetDataDir(), genesis.ID()) return genesisDir, nil } func (nc NodeController) setAlgodCmdLogFiles(cmd *exec.Cmd) (files []*os.File) { { // Scoped to ensure err and out variables aren't mixed up errFileName := filepath.Join(nc.algodDataDir, StdErrFilename) errFile, err := os.OpenFile(errFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { cmd.Stderr = errFile files = append(files, errFile) } else { fmt.Fprintf(os.Stderr, "error creating file for capturing stderr: %v\n", err) } } { outFileName := filepath.Join(nc.algodDataDir, StdOutFilename) outFile, err := os.OpenFile(outFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) if err == nil { cmd.Stdout = outFile files = append(files, outFile) } else { fmt.Fprintf(os.Stderr, "error creating file for capturing stdout: %v\n", err) } } return } func (nc NodeController) readGenesisJSON(genesisFile string) (genesisLedger bookkeeping.Genesis, err error) { // Load genesis genesisText, err := ioutil.ReadFile(genesisFile) if err != nil { return } err = protocol.DecodeJSON(genesisText, &genesisLedger) return }
1
37,409
rebuild: say loads and merges
algorand-go-algorand
go
@@ -20,7 +20,6 @@ * External dependencies */ import { getDefaultOptions } from 'expect-puppeteer'; -import { Page, ElementHandle } from 'puppeteer'; /** * Jest matcher for asserting the given instance has tracking loaded or not.
1
/** * Custom matcher for checking the presence of Site Kit event tracking. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import { getDefaultOptions } from 'expect-puppeteer'; import { Page, ElementHandle } from 'puppeteer'; /** * Jest matcher for asserting the given instance has tracking loaded or not. * * @since n.e.x.t * * @param {(Page|ElementHandle)} instance Page or element handle instance. * @param {Object} [options] Matcher options. * @param {number} [options.timeout] Maximum time to wait for selector in milliseconds. * @return {Object} Object with `pass` and `message` keys. */ export async function toHaveTracking( instance, { timeout } = getDefaultOptions() ) { let pass, message; try { await expect( instance ).toMatchElement( 'script[data-googlesitekit-gtag]', { timeout } ); pass = true; message = () => `Expected tracking not to be loaded`; } catch { pass = false; message = () => `Expected tracking to be loaded`; } return { pass, message }; }
1
30,285
Why was this removed here (also in the other file)? Shouldn't we import them so that the reference in the docs below is interpreted correctly?
google-site-kit-wp
js
@@ -32,13 +32,12 @@ from databricks.koalas.series import Series class PySparkTestCase(unittest.TestCase): - def setUp(self): self._old_sys_path = list(sys.path) if SparkContext._active_spark_context is not None: SparkContext._active_spark_context.stop() class_name = self.__class__.__name__ - self.sc = SparkContext('local[4]', class_name) + self.sc = SparkContext("local[4]", class_name) def tearDown(self): self.sc.stop()
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import functools import shutil import sys import tempfile import unittest from contextlib import contextmanager import pandas as pd from pyspark import SparkConf, SparkContext from pyspark.sql import SparkSession, SQLContext from databricks import koalas from databricks.koalas.frame import DataFrame from databricks.koalas.indexes import Index from databricks.koalas.series import Series class PySparkTestCase(unittest.TestCase): def setUp(self): self._old_sys_path = list(sys.path) if SparkContext._active_spark_context is not None: SparkContext._active_spark_context.stop() class_name = self.__class__.__name__ self.sc = SparkContext('local[4]', class_name) def tearDown(self): self.sc.stop() sys.path = self._old_sys_path class ReusedPySparkTestCase(unittest.TestCase): @classmethod def conf(cls): """ Override this in subclasses to supply a more specific conf """ return SparkConf() @classmethod def setUpClass(cls): if SparkContext._active_spark_context is not None: SparkContext._active_spark_context.stop() cls.sc = SparkContext('local[4]', cls.__name__, conf=cls.conf()) @classmethod def tearDownClass(cls): cls.sc.stop() class SQLTestUtils(object): """ This util assumes the instance of this to have 'spark' attribute, having a spark session. It is usually used with 'ReusedSQLTestCase' class but can be used if you feel sure the the implementation of this class has 'spark' attribute. """ @contextmanager def sql_conf(self, pairs): """ A convenient context manager to test some configuration specific logic. This sets `value` to the configuration `key` and then restores it back when it exits. """ assert isinstance(pairs, dict), "pairs should be a dictionary." assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session." keys = pairs.keys() new_values = pairs.values() old_values = [self.spark.conf.get(key, None) for key in keys] for key, new_value in zip(keys, new_values): self.spark.conf.set(key, new_value) try: yield finally: for key, old_value in zip(keys, old_values): if old_value is None: self.spark.conf.unset(key) else: self.spark.conf.set(key, old_value) @contextmanager def database(self, *databases): """ A convenient context manager to test with some specific databases. This drops the given databases if it exists and sets current database to "default" when it exits. """ assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session." try: yield finally: for db in databases: self.spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db) self.spark.catalog.setCurrentDatabase("default") @contextmanager def table(self, *tables): """ A convenient context manager to test with some specific tables. This drops the given tables if it exists. """ assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session." try: yield finally: for t in tables: self.spark.sql("DROP TABLE IF EXISTS %s" % t) @contextmanager def tempView(self, *views): """ A convenient context manager to test with some specific views. This drops the given views if it exists. """ assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session." try: yield finally: for v in views: self.spark.catalog.dropTempView(v) @contextmanager def function(self, *functions): """ A convenient context manager to test with some specific functions. This drops the given functions if it exists. """ assert hasattr(self, "spark"), "it should have 'spark' attribute, having a spark session." try: yield finally: for f in functions: self.spark.sql("DROP FUNCTION IF EXISTS %s" % f) class ReusedSQLTestCase(ReusedPySparkTestCase, SQLTestUtils): @classmethod def setUpClass(cls): super(ReusedSQLTestCase, cls).setUpClass() cls.spark = SparkSession(cls.sc) cls.spark.conf.set('spark.sql.execution.arrow.enabled', True) @classmethod def tearDownClass(cls): super(ReusedSQLTestCase, cls).tearDownClass() cls.spark.stop() SQLContext._instantiatedContext = None def assertPandasEqual(self, left, right): if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame): msg = ("DataFrames are not equal: " + "\n\nLeft:\n%s\n%s" % (left, left.dtypes) + "\n\nRight:\n%s\n%s" % (right, right.dtypes)) self.assertTrue(left.equals(right), msg=msg) elif isinstance(left, pd.Series) and isinstance(right, pd.Series): msg = ("Series are not equal: " + "\n\nLeft:\n%s\n%s" % (left, left.dtype) + "\n\nRight:\n%s\n%s" % (right, right.dtype)) self.assertTrue((left == right).all(), msg=msg) elif isinstance(left, pd.Index) and isinstance(right, pd.Index): msg = ("Indices are not equal: " + "\n\nLeft:\n%s\n%s" % (left, left.dtype) + "\n\nRight:\n%s\n%s" % (right, right.dtype)) self.assertTrue((left == right).all(), msg=msg) else: raise ValueError("Unexpected values: (%s, %s)" % (left, right)) def assertPandasAlmostEqual(self, left, right): """ This function checks if given Pandas objects approximately same, which means the conditions below: - Both objects are nullable - Compare floats rounding to the number of decimal places, 7 after dropping missing values (NaN, NaT, None) """ if isinstance(left, pd.DataFrame) and isinstance(right, pd.DataFrame): msg = ("DataFrames are not almost equal: " + "\n\nLeft:\n%s\n%s" % (left, left.dtypes) + "\n\nRight:\n%s\n%s" % (right, right.dtypes)) self.assertEqual(left.shape, right.shape, msg=msg) for lcol, rcol in zip(left.columns, right.columns): self.assertEqual(str(lcol), str(rcol), msg=msg) for lnull, rnull in zip(left[lcol].isnull(), right[rcol].isnull()): self.assertEqual(lnull, rnull, msg=msg) for lval, rval in zip(left[lcol].dropna(), right[rcol].dropna()): self.assertAlmostEqual(lval, rval, msg=msg) elif isinstance(left, pd.Series) and isinstance(left, pd.Series): msg = ("Series are not almost equal: " + "\n\nLeft:\n%s\n%s" % (left, left.dtype) + "\n\nRight:\n%s\n%s" % (right, right.dtype)) self.assertEqual(len(left), len(right), msg=msg) for lnull, rnull in zip(left.isnull(), right.isnull()): self.assertEqual(lnull, rnull, msg=msg) for lval, rval in zip(left.dropna(), right.dropna()): self.assertAlmostEqual(lval, rval, msg=msg) elif isinstance(left, pd.Index) and isinstance(left, pd.Index): msg = ("Indices are not almost equal: " + "\n\nLeft:\n%s\n%s" % (left, left.dtype) + "\n\nRight:\n%s\n%s" % (right, right.dtype)) self.assertEqual(len(left), len(right), msg=msg) for lnull, rnull in zip(left.isnull(), right.isnull()): self.assertEqual(lnull, rnull, msg=msg) for lval, rval in zip(left.dropna(), right.dropna()): self.assertAlmostEqual(lval, rval, msg=msg) else: raise ValueError("Unexpected values: (%s, %s)" % (left, right)) def assert_eq(self, left, right, almost=False): """ Asserts if two arbitrary objects are equal or not. If given objects are Koalas DataFrame or Series, they are converted into Pandas' and compared. :param left: object to compare :param right: object to compare :param almost: if this is enabled, the comparison is delegated to `unittest`'s `assertAlmostEqual`. See its documentation for more details. """ lpdf = self._to_pandas(left) rpdf = self._to_pandas(right) if isinstance(lpdf, (pd.DataFrame, pd.Series, pd.Index)): if almost: self.assertPandasAlmostEqual(lpdf, rpdf) else: self.assertPandasEqual(lpdf, rpdf) else: if almost: self.assertAlmostEqual(lpdf, rpdf) else: self.assertEqual(lpdf, rpdf) def assert_array_eq(self, left, right): self.assertTrue((left == right).all()) @staticmethod def _to_pandas(df): if isinstance(df, (DataFrame, Series, Index)): return df.toPandas() else: return df class TestUtils(object): @contextmanager def temp_dir(self): tmp = tempfile.mkdtemp() try: yield tmp finally: shutil.rmtree(tmp) @contextmanager def temp_file(self): with self.temp_dir() as tmp: yield tempfile.mktemp(dir=tmp) class ComparisonTestBase(ReusedSQLTestCase): @property def kdf(self): return koalas.from_pandas(self.pdf) @property def pdf(self): return self.kdf.toPandas() def compare_both(f=None, almost=True): if f is None: return functools.partial(compare_both, almost=almost) elif isinstance(f, bool): return functools.partial(compare_both, almost=f) @functools.wraps(f) def wrapped(self): if almost: compare = self.assertPandasAlmostEqual else: compare = self.assertPandasEqual for result_pandas, result_spark in zip(f(self, self.pdf), f(self, self.kdf)): compare(result_pandas, result_spark.toPandas()) return wrapped
1
11,433
I'd prefer to have a blank line between the class declaration and its first member. Is it possible?
databricks-koalas
py
@@ -299,7 +299,7 @@ describe('Mongos SRV Polling', function () { return records.map(r => `${r.name}:${r.port}`); } - const MONGOS_DEFAULT_ISMASTER = Object.assign({}, mock.DEFAULT_ISMASTER_36, { + const MONGOS_LEGACY_HELLO = Object.assign({}, mock.HELLO, { msg: 'isdbgrid' });
1
'use strict'; const { Topology } = require('../../../src/sdam/topology'); const { TopologyDescription } = require('../../../src/sdam/topology_description'); const { TopologyType } = require('../../../src/sdam/common'); const { SrvPoller, SrvPollingEvent } = require('../../../src/sdam/srv_polling'); const sdamEvents = require('../../../src/sdam/events'); const dns = require('dns'); const EventEmitter = require('events').EventEmitter; const chai = require('chai'); const sinon = require('sinon'); const mock = require('../../tools/mock'); const { HostAddress } = require('../../../src/utils'); const { MongoDriverError } = require('../../../src/error'); const expect = chai.expect; chai.use(require('sinon-chai')); describe('Mongos SRV Polling', function () { const context = {}; const SRV_HOST = 'darmok.tanagra.com'; function srvRecord(mockServer, port) { if (typeof mockServer === 'string') { mockServer = { host: mockServer, port }; } return { priority: 0, weight: 0, port: mockServer.port, name: mockServer.host }; } function tryDone(done, handle) { process.nextTick(() => { try { handle(); done(); } catch (e) { done(e); } }); } function stubDns(err, records) { context.sinon.stub(dns, 'resolveSrv').callsFake(function (_srvAddress, callback) { process.nextTick(() => callback(err, records)); }); } before(function () { context.sinon = sinon.createSandbox(); }); afterEach(function () { context.sinon.restore(); }); after(function () { delete context.sinon; }); describe('SrvPoller', function () { function stubPoller(poller) { context.sinon.stub(poller, 'success'); context.sinon.stub(poller, 'failure'); context.sinon.stub(poller, 'parentDomainMismatch'); } it('should always return a valid value for `intervalMS`', function () { const poller = new SrvPoller({ srvHost: SRV_HOST }); expect(poller).property('intervalMS').to.equal(60000); }); describe('success', function () { it('should emit event, disable haMode, and schedule another poll', function (done) { const records = [srvRecord('jalad.tanagra.com'), srvRecord('thebeast.tanagra.com')]; const poller = new SrvPoller({ srvHost: SRV_HOST }); context.sinon.stub(poller, 'schedule'); poller.haMode = true; expect(poller).to.have.property('haMode', true); poller.once('srvRecordDiscovery', e => { tryDone(done, () => { expect(e) .to.be.an.instanceOf(SrvPollingEvent) .and.to.have.property('srvRecords') .that.deep.equals(records); expect(poller.schedule).to.have.been.calledOnce; expect(poller).to.have.property('haMode', false); }); }); poller.success(records); }); }); describe('failure', function () { it('should enable haMode and schedule', function () { const poller = new SrvPoller({ srvHost: SRV_HOST }); context.sinon.stub(poller, 'schedule'); poller.failure('Some kind of failure'); expect(poller.schedule).to.have.been.calledOnce; expect(poller).to.have.property('haMode', true); }); }); describe('poll', function () { it('should throw if srvHost is not passed in', function () { expect(() => new SrvPoller()).to.throw(MongoDriverError); expect(() => new SrvPoller({})).to.throw(MongoDriverError); }); it('should poll dns srv records', function () { const poller = new SrvPoller({ srvHost: SRV_HOST }); context.sinon.stub(dns, 'resolveSrv'); poller._poll(); expect(dns.resolveSrv).to.have.been.calledOnce.and.to.have.been.calledWith( `_mongodb._tcp.${SRV_HOST}`, sinon.match.func ); }); it('should not succeed or fail if poller was stopped', function (done) { const poller = new SrvPoller({ srvHost: SRV_HOST }); stubDns(null, []); stubPoller(poller); poller._poll(); poller.generation += 1; tryDone(done, () => { expect(poller.success).to.not.have.been.called; expect(poller.failure).to.not.have.been.called; expect(poller.parentDomainMismatch).to.not.have.been.called; }); }); it('should fail if dns returns error', function (done) { const poller = new SrvPoller({ srvHost: SRV_HOST }); stubDns(new Error('Some Error')); stubPoller(poller); poller._poll(); tryDone(done, () => { expect(poller.success).to.not.have.been.called; expect(poller.failure).to.have.been.calledOnce.and.calledWith('DNS error'); expect(poller.parentDomainMismatch).to.not.have.been.called; }); }); it('should fail if dns returns no records', function (done) { const poller = new SrvPoller({ srvHost: SRV_HOST }); stubDns(null, []); stubPoller(poller); poller._poll(); tryDone(done, () => { expect(poller.success).to.not.have.been.called; expect(poller.failure).to.have.been.calledOnce.and.calledWith( 'No valid addresses found at host' ); expect(poller.parentDomainMismatch).to.not.have.been.called; }); }); it('should fail if dns returns no records that match parent domain', function (done) { const poller = new SrvPoller({ srvHost: SRV_HOST }); const records = [srvRecord('jalad.tanagra.org'), srvRecord('shaka.walls.com')]; stubDns(null, records); stubPoller(poller); poller._poll(); tryDone(done, () => { expect(poller.success).to.not.have.been.called; expect(poller.failure).to.have.been.calledOnce.and.calledWith( 'No valid addresses found at host' ); expect(poller.parentDomainMismatch) .to.have.been.calledTwice.and.calledWith(records[0]) .and.calledWith(records[1]); }); }); it('should succeed when valid records are returned by dns', function (done) { const poller = new SrvPoller({ srvHost: SRV_HOST }); const records = [srvRecord('jalad.tanagra.com'), srvRecord('thebeast.tanagra.com')]; stubDns(null, records); stubPoller(poller); poller._poll(); tryDone(done, () => { expect(poller.success).to.have.been.calledOnce.and.calledWithMatch(records); expect(poller.failure).to.not.have.been.called; expect(poller.parentDomainMismatch).to.not.have.been.called; }); }); it('should succeed when some valid records are returned and some do not match parent domain', function (done) { const poller = new SrvPoller({ srvHost: SRV_HOST }); const records = [srvRecord('jalad.tanagra.com'), srvRecord('thebeast.walls.com')]; stubDns(null, records); stubPoller(poller); poller._poll(); tryDone(done, () => { expect(poller.success).to.have.been.calledOnce.and.calledWithMatch([records[0]]); expect(poller.failure).to.not.have.been.called; expect(poller.parentDomainMismatch).to.have.been.calledOnce.and.calledWith(records[1]); }); }); }); }); describe('topology', function () { class FakeSrvPoller extends EventEmitter { start() {} stop() {} trigger(srvRecords) { this.emit('srvRecordDiscovery', new SrvPollingEvent(srvRecords)); } } it('should not make an srv poller if there is no srv host', function () { const srvPoller = new FakeSrvPoller({ srvHost: SRV_HOST }); const topology = new Topology(['localhost:27017', 'localhost:27018'], { srvPoller }); expect(topology).to.not.have.property('srvPoller'); }); it('should make an srvPoller if there is an srvHost', function () { const srvPoller = new FakeSrvPoller({ srvHost: SRV_HOST }); const topology = new Topology(['localhost:27017', 'localhost:27018'], { srvHost: SRV_HOST, srvPoller }); expect(topology.s).to.have.property('srvPoller').that.equals(srvPoller); }); it('should only start polling if topology description changes to sharded', function () { const srvPoller = new FakeSrvPoller({ srvHost: SRV_HOST }); sinon.stub(srvPoller, 'start'); const topology = new Topology(['localhost:27017', 'localhost:27018'], { srvHost: SRV_HOST, srvPoller }); const topologyDescriptions = [ new TopologyDescription(TopologyType.Unknown), new TopologyDescription(TopologyType.Unknown), new TopologyDescription(TopologyType.Sharded), new TopologyDescription(TopologyType.Sharded) ]; function emit(prev, current) { topology.emit( 'topologyDescriptionChanged', new sdamEvents.TopologyDescriptionChangedEvent(topology.s.id, prev, current) ); } expect(srvPoller.start).to.not.have.been.called; emit(topologyDescriptions[0], topologyDescriptions[1]); expect(srvPoller.start).to.not.have.been.called; emit(topologyDescriptions[1], topologyDescriptions[2]); expect(srvPoller.start).to.have.been.calledOnce; emit(topologyDescriptions[2], topologyDescriptions[3]); expect(srvPoller.start).to.have.been.calledOnce; }); describe('prose tests', function () { function srvAddresses(records) { return records.map(r => `${r.name}:${r.port}`); } const MONGOS_DEFAULT_ISMASTER = Object.assign({}, mock.DEFAULT_ISMASTER_36, { msg: 'isdbgrid' }); beforeEach(function () { return Promise.all(Array.from({ length: 4 }).map(() => mock.createServer())).then( servers => { context.servers = servers; } ); }); afterEach(function () { return mock.cleanup(); }); afterEach(function (done) { if (context.topology) { context.topology.close(done); } else { done(); } }); function runSrvPollerTest(recordSets, done) { context.servers.forEach(server => { server.setMessageHandler(request => { const doc = request.document; if (doc.ismaster || doc.hello) { request.reply(Object.assign({}, MONGOS_DEFAULT_ISMASTER)); } }); }); const srvPoller = new FakeSrvPoller({ srvHost: SRV_HOST }); const seedlist = recordSets[0].map(record => HostAddress.fromString(`${record.name}:${record.port}`) ); context.topology = new Topology(seedlist, { srvPoller, srvHost: SRV_HOST }); const topology = context.topology; topology.connect({}, err => { if (err) { return done(err); } try { expect(topology.description).to.have.property('type', TopologyType.Sharded); const servers = Array.from(topology.description.servers.keys()); expect(servers).to.deep.equal(srvAddresses(recordSets[0])); topology.once('topologyDescriptionChanged', function () { tryDone(done, function () { const servers = Array.from(topology.description.servers.keys()); expect(servers).to.deep.equal(srvAddresses(recordSets[1])); }); }); process.nextTick(() => srvPoller.trigger(recordSets[1])); } catch (e) { done(e); } }); } // The addition of a new DNS record: // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. it('should handle the addition of a new DNS record', function (done) { const recordSets = [ [srvRecord(context.servers[0]), srvRecord(context.servers[1])], [ srvRecord(context.servers[0]), srvRecord(context.servers[1]), srvRecord(context.servers[2]) ] ]; runSrvPollerTest(recordSets, done); }); // The removal of an existing DNS record: // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. it('should handle the removal of an existing DNS record', function (done) { const recordSets = [ [srvRecord(context.servers[0]), srvRecord(context.servers[1])], [srvRecord(context.servers[0])] ]; runSrvPollerTest(recordSets, done); }); // The replacement of a DNS record: // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. // replace by: // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. it('should handle the replacement of a DNS record', function (done) { const recordSets = [ [srvRecord(context.servers[0]), srvRecord(context.servers[1])], [srvRecord(context.servers[0]), srvRecord(context.servers[2])] ]; runSrvPollerTest(recordSets, done); }); // The replacement of both existing DNS records with one new record: // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. it('should handle the replacement of both existing DNS records with one new record', function (done) { const recordSets = [ [srvRecord(context.servers[0]), srvRecord(context.servers[1])], [srvRecord(context.servers[2])] ]; runSrvPollerTest(recordSets, done); }); // The replacement of both existing DNS records with two new records: // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. // _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27020 localhost.test.build.10gen.cc. it('should handle the replacement of both existing DNS records with two new records', function (done) { const recordSets = [ [srvRecord(context.servers[0]), srvRecord(context.servers[1])], [srvRecord(context.servers[2]), srvRecord(context.servers[3])] ]; runSrvPollerTest(recordSets, done); }); }); }); });
1
21,278
Is it really LEGACY_HELLO if we are using the most up to date HELLO? Perhaps `MONGOS_HELLO` would work here?
mongodb-node-mongodb-native
js
@@ -1922,10 +1922,10 @@ detach_on_permanent_stack(bool internal, bool do_cleanup) DEBUG_DECLARE(bool ok;) DEBUG_DECLARE(int exit_res;) /* synch-all flags: if we fail to suspend a thread (e.g., privilege - * problems) ignore it. XXX Should we retry instead? + * problems) retry it. */ /* i#297: we only synch client threads after process exit event. */ - uint flags = THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SKIP_CLIENT_THREAD; + uint flags = THREAD_SYNCH_SUSPEND_FAILURE_RETRY | THREAD_SYNCH_SKIP_CLIENT_THREAD; ENTERING_DR();
1
/* ********************************************************** * Copyright (c) 2012-2017 Google, Inc. All rights reserved. * Copyright (c) 2008-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* * thread.c - thread synchronization */ #include "globals.h" #include "synch.h" #include "instrument.h" /* is_in_client_lib() */ #include "hotpatch.h" /* hotp_only_in_tramp() */ #include "fragment.h" /* get_at_syscall() */ #include "fcache.h" /* in_fcache() */ #include "translate.h" #include "native_exec.h" #include <string.h> /* for memcpy */ extern vm_area_vector_t *fcache_unit_areas; /* from fcache.c */ static bool started_detach = false; /* set before synchall */ bool doing_detach = false; /* set after synchall */ static void synch_thread_yield(void); /* Thread-local data */ typedef struct _thread_synch_data_t { /* the following three fields are used to synchronize for detach, suspend * thread, terminate thread, terminate process */ /* synch_lock and pending_synch_count act as a semaphore */ /* for check_wait_at_safe_spot() must use a spin_mutex_t */ spin_mutex_t *synch_lock; /* we allow pending_synch_count to be read without holding the synch_lock * so all updates should be ATOMIC as well as holding the lock */ int pending_synch_count; /* to guarantee that the thread really has this permission you need to hold * the synch_lock when you read this value */ thread_synch_permission_t synch_perm; /* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set * to whether synch_with_all_threads was successful in synching this thread. */ bool synch_with_success; /* Case 10101: allows threads waiting_at_safe_spot() to set their own * contexts. This use sometimes requires a full os-specific context, which * we hide behind a generic pointer and a size. */ priv_mcontext_t *set_mcontext; void *set_context; size_t set_context_size; #ifdef X64 /* PR 263338: we have to pad for alignment */ byte *set_context_alloc; #endif } thread_synch_data_t; /* This lock prevents more than one thread from being in the synch_with_all_ * threads method body at the same time (which would lead to deadlock as they * tried to synchronize with each other) */ DECLARE_CXTSWPROT_VAR(mutex_t all_threads_synch_lock, INIT_LOCK_FREE(all_threads_synch_lock)); /* pass either mc or both cxt and cxt_size */ static void free_setcontext(priv_mcontext_t *mc, void *cxt, size_t cxt_size _IF_X64(byte *cxt_alloc)) { if (mc != NULL) { ASSERT(cxt == NULL); global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER)); } else if (cxt != NULL) { ASSERT(cxt_size > 0); global_heap_free(IF_X64_ELSE(cxt_alloc, cxt), cxt_size HEAPACCT(ACCT_OTHER)); } } static void synch_thread_free_setcontext(thread_synch_data_t *tsd) { free_setcontext(tsd->set_mcontext, tsd->set_context, tsd->set_context_size _IF_X64(tsd->set_context_alloc)); tsd->set_mcontext = NULL; tsd->set_context = NULL; } void synch_init(void) { } void synch_exit(void) { ASSERT(uninit_thread_count == 0); DELETE_LOCK(all_threads_synch_lock); } void synch_thread_init(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *) heap_alloc(dcontext, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER)); dcontext->synch_field = (void *) tsd; tsd->pending_synch_count = 0; tsd->synch_perm = THREAD_SYNCH_NONE; tsd->synch_with_success = false; tsd->set_mcontext = NULL; tsd->set_context = NULL; /* the synch_lock is in unprotected memory so that check_wait_at_safe_spot * can call the EXITING_DR hook before releasing it */ tsd->synch_lock = HEAP_TYPE_ALLOC(dcontext, spin_mutex_t, ACCT_OTHER, UNPROTECTED); ASSIGN_INIT_SPINMUTEX_FREE(*tsd->synch_lock, synch_lock); } void synch_thread_exit(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; /* Could be waiting at safe spot when we detach or exit */ synch_thread_free_setcontext(tsd); DELETE_SPINMUTEX(*tsd->synch_lock); /* Note that we do need to free this in non-debug builds since, despite * appearances, UNPROTECTED_LOCAL is acutally allocated on a global * heap. */ HEAP_TYPE_FREE(dcontext, tsd->synch_lock, spin_mutex_t, ACCT_OTHER, UNPROTECTED); #ifdef DEBUG /* for non-debug we do fast exit path and don't free local heap */ /* clean up tsd fields here */ heap_free(dcontext, tsd, sizeof(thread_synch_data_t) HEAPACCT(ACCT_OTHER)); #endif } /* Check for a no-xfer permission. Currently used only for case 6821, * where we need to distinguish three groups: unsafe (wait for safe * point), safe and translatable, and safe but not translatable. */ bool thread_synch_state_no_xfer(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; return (tsd->synch_perm == THREAD_SYNCH_NO_LOCKS_NO_XFER || tsd->synch_perm == THREAD_SYNCH_VALID_MCONTEXT_NO_XFER); } bool thread_synch_check_state(dcontext_t *dcontext, thread_synch_permission_t desired_perm) { thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; return THREAD_SYNCH_SAFE(tsd->synch_perm, desired_perm); } /* Only valid while holding all_threads_synch_lock and thread_initexit_lock. Set to * whether synch_with_all_threads was successful in synching this thread. * Cannot be called when THREAD_SYNCH_*_AND_CLEANED was requested as the * thread-local memory will be freed on success! */ bool thread_synch_successful(thread_record_t *tr) { thread_synch_data_t *tsd; ASSERT(tr != NULL && tr->dcontext != NULL); ASSERT_OWN_MUTEX(true, &all_threads_synch_lock); ASSERT_OWN_MUTEX(true, &thread_initexit_lock); tsd = (thread_synch_data_t *) tr->dcontext->synch_field; return tsd->synch_with_success; } #ifdef UNIX /* i#2659: the kernel is now doing auto-restart so we have to check for the * pc being at the syscall. */ static bool is_after_or_restarted_do_syscall(dcontext_t *dcontext, app_pc pc, bool check_vsyscall) { if (is_after_do_syscall_addr(dcontext, pc)) return true; if (check_vsyscall && pc == vsyscall_sysenter_return_pc) return true; if (!get_at_syscall(dcontext)) /* rule out having just reached the syscall */ return false; int syslen = syscall_instr_length(dr_get_isa_mode(dcontext)); if (is_after_do_syscall_addr(dcontext, pc + syslen)) return true; if (check_vsyscall && pc + syslen == vsyscall_sysenter_return_pc) return true; return false; } #endif bool is_at_do_syscall(dcontext_t *dcontext, app_pc pc, byte *esp) { app_pc buf[2]; bool res = safe_read(esp, sizeof(buf), buf); if (!res) { ASSERT(res); /* we expect the stack to always be readable */ return false; } if (does_syscall_ret_to_callsite()) { #ifdef WINDOWS if (get_syscall_method() == SYSCALL_METHOD_INT && DYNAMO_OPTION(sygate_int)) { return (pc == after_do_syscall_addr(dcontext) && buf[0] == after_do_syscall_code(dcontext)); } else { return pc == after_do_syscall_code(dcontext); } #else return is_after_or_restarted_do_syscall(dcontext, pc, false/*!vsys*/); #endif } else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER) { #ifdef WINDOWS if (pc == vsyscall_after_syscall) { if (DYNAMO_OPTION(sygate_sysenter)) return buf[1] == after_do_syscall_code(dcontext); else return buf[0] == after_do_syscall_code(dcontext); } else { /* not at a system call, could still have tos match after_do_syscall * either by chance or because we leak that value on the apps stack * (a non transparency) */ ASSERT_CURIOSITY(buf[0] != after_do_syscall_code(dcontext)); return false; } #else /* Even when the main syscall method is sysenter, we also have a * do_int_syscall and do_clone_syscall that use int, so check both. * Note that we don't modify the stack, so once we do sysenter syscalls * inlined in the cache (PR 288101) we'll need some mechanism to * distinguish those: but for now if a sysenter instruction is used it * has to be do_syscall since DR's own syscalls are ints. */ return is_after_or_restarted_do_syscall(dcontext, pc, true/*vsys*/); #endif } /* we can reach here w/ a fault prior to 1st syscall on Linux */ IF_WINDOWS(ASSERT_NOT_REACHED()); return false; } /* Helper function for at_safe_spot(). Note state for client-owned threads isn't * considered valid since it may be holding client locks and doesn't correspond to * an actual app state. Caller should handle client-owned threads appropriately. */ static bool is_native_thread_state_valid(dcontext_t *dcontext, app_pc pc, byte *esp) { /* ref case 3675, the assumption is that if we aren't executing * out of dr memory and our stack isn't in dr memory (to disambiguate * pc in kernel32, ntdll etc.) then the app has a valid native context. * However, we can't call is_dynamo_address() as it (and its children) * grab too many different locks, all of which we would have to check * here in the same manner as fcache_unit_areas.lock in at_safe_spot(). So * instead we just check the pc for the dr dll, interception code, and * do_syscall regions and check the stack against the thread's dr stack * and the initstack, all of which we can do without grabbing any locks. * That should be sufficient at this point, FIXME try to use something * like is_dynamo_address() to make this more maintainable */ /* For sysenter system calls we also have to check the top of the stack * for the after_do_syscall_address to catch the do_syscall @ syscall * itself case. */ ASSERT(esp != NULL); ASSERT(is_thread_currently_native(dcontext->thread_record)); #ifdef WINDOWS if (pc == (app_pc) thread_attach_takeover) { /* We are trying to take over this thread but it has not yet been * scheduled. It was native, and can't hold any DR locks. */ return true; } #endif return (!is_in_dynamo_dll(pc) && IF_WINDOWS(!is_part_of_interception(pc) &&) (!in_generated_routine(dcontext, pc) || /* we allow native thread to be at do_syscall - for int syscalls the pc * (syscall return point) will be in do_syscall (so in generated routine) * xref case 9333 */ is_at_do_syscall(dcontext, pc, esp)) && !is_on_initstack(esp) && !is_on_dstack(dcontext, esp) && IF_CLIENT_INTERFACE(!is_in_client_lib(pc) &&) /* xref PR 200067 & 222812 on client-owned native threads */ IF_CLIENT_INTERFACE(!IS_CLIENT_THREAD(dcontext) &&) #ifdef HOT_PATCHING_INTERFACE /* Shouldn't be in the middle of executing a hotp_only patch. The * check for being in hotp_dll is WHERE_HOTPATCH because the patch can * change esp. */ (dcontext->whereami != WHERE_HOTPATCH && /* dynamo dll check has been done */ !hotp_only_in_tramp(pc)) && #endif true /* no effect, simplifies ifdef handling with && above */ ); } /* Translates the context mcontext for the given thread trec. If * restore_memory is true, also restores any memory values that were * shifted (primarily due to clients). If restore_memory is true, the * caller should always relocate the translated thread, as it may not * execute properly if left at its current location (it could be in the * middle of client code in the cache). * If recreate_app_state() is called, f will be passed through to it. * * Like any instance where a thread_record_t is used by a thread other than its * owner, the caller must hold the thread_initexit_lock to ensure that it * remains valid. * Requires thread trec is at_safe_spot(). */ bool translate_mcontext(thread_record_t *trec, priv_mcontext_t *mcontext, bool restore_memory, fragment_t *f) { thread_synch_data_t *tsd = (thread_synch_data_t *) trec->dcontext->synch_field; bool res; recreate_success_t success; bool native_translate = false; ASSERT(tsd->pending_synch_count >= 0); /* check if native thread */ if (is_thread_currently_native(trec)) { /* running natively, no need to translate unless at do_syscall for an * intercepted-via-trampoline syscall which we allow now for case 9333 */ #ifdef CLIENT_INTERFACE if (IS_CLIENT_THREAD(trec->dcontext)) { /* don't need to translate anything */ LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread "TIDFMT" is client " "thread, no translation needed\n", trec->id); return true; } #endif if (is_native_thread_state_valid(trec->dcontext, (app_pc)mcontext->pc, (byte *)mcontext->xsp)) { #ifdef WINDOWS if ((app_pc)mcontext->pc == (app_pc) thread_attach_takeover) { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread "TIDFMT" at " "takeover point\n", trec->id); thread_attach_translate(trec->dcontext, mcontext, restore_memory); return true; } #endif if (is_at_do_syscall(trec->dcontext, (app_pc)mcontext->pc, (byte *)mcontext->xsp)) { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread "TIDFMT" running " "natively, at do_syscall so translation needed\n", trec->id); native_translate = true; } else { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread "TIDFMT" running " "natively, no translation needed\n", trec->id); return true; } } else { /* now that do_syscall is a safe spot for native threads we shouldn't get * here for get context on self, FIXME - is however possible to get here * via get_context on unsuspended thread (result of which is technically * undefined according to MS), see get_context post sys comments * (should prob. synch there in which case can assert here) */ ASSERT(trec->id != get_thread_id()); ASSERT_CURIOSITY(false && "translate failure, likely get context on " "unsuspended native thread"); /* we'll just try to translate and hope for the best */ native_translate = true; } } if (!native_translate) { /* check if waiting at a good spot */ spinmutex_lock(tsd->synch_lock); res = THREAD_SYNCH_SAFE(tsd->synch_perm, THREAD_SYNCH_VALID_MCONTEXT); spinmutex_unlock(tsd->synch_lock); if (res) { LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread "TIDFMT" waiting at " "valid mcontext point, copying over\n", trec->id); DOLOG(2, LOG_SYNCH, { LOG(THREAD_GET, LOG_SYNCH, 2, "Thread State\n"); dump_mcontext(get_mcontext(trec->dcontext), THREAD_GET, DUMP_NOT_XML); }); *mcontext = *get_mcontext(trec->dcontext); return true; } } /* In case 4148 we see a thread calling NtGetContextThread on itself, which * is undefined according to MS but it does get the syscall address, so it's * fine with us. For other threads the app shouldn't be asking about them * unless they're suspended, and the same goes for us. */ ASSERT_CURIOSITY(trec->dcontext->whereami == WHERE_FCACHE || native_translate || trec->id == get_thread_id()); LOG(THREAD_GET, LOG_SYNCH, 2, "translate context, thread "TIDFMT" at pc_recreatable spot translating\n", trec->id); success = recreate_app_state(trec->dcontext, mcontext, restore_memory, f); if (success != RECREATE_SUCCESS_STATE) { /* should never happen right? * actually it does when deciding whether can deliver a signal * immediately (PR 213040). */ LOG(THREAD_GET, LOG_SYNCH, 1, "translate context, thread "TIDFMT" unable to translate context at pc" " = "PFX"\n", trec->id, mcontext->pc); SYSLOG_INTERNAL_WARNING_ONCE("failed to translate"); return false; } return true; } static bool waiting_at_safe_spot(thread_record_t *trec, thread_synch_state_t desired_state) { thread_synch_data_t *tsd = (thread_synch_data_t *) trec->dcontext->synch_field; ASSERT(tsd->pending_synch_count >= 0); /* check if waiting at a good spot, note that we can't spin in * case the suspended thread is holding this lock, note only need * lock to check the synch_perm */ if (spinmutex_trylock(tsd->synch_lock)) { thread_synch_permission_t perm = tsd->synch_perm; bool res = THREAD_SYNCH_SAFE(perm, desired_state); spinmutex_unlock(tsd->synch_lock); if (res) { LOG(THREAD_GET, LOG_SYNCH, 2, "thread "TIDFMT" waiting at safe spot (synch_perm=%d)\n", trec->id, perm); return true; } } else { LOG(THREAD_GET, LOG_SYNCH, 2, "at_safe_spot unable to get locks to test if thread "TIDFMT" is waiting " "at safe spot\n", trec->id); } return false; } #ifdef CLIENT_SIDELINE static bool should_suspend_client_thread(dcontext_t *dcontext, thread_synch_state_t desired_state) { /* Marking un-suspendable does not apply to cleaning/terminating */ ASSERT(IS_CLIENT_THREAD(dcontext)); return (THREAD_SYNCH_IS_CLEANED(desired_state) || dcontext->client_data->suspendable); } #endif /* checks whether the thread trec is at a spot suitable for requested define * desired_state * Requires that trec thread is suspended */ /* Note that since trec is potentially suspended at an arbitrary point, * this function (and any function it calls) cannot call mutex_lock as * trec thread may hold a lock. It is ok for at_safe_spot to return false if * it can't obtain a lock on the first try. FIXME : in the long term we may * want to go to a locking model that stores the thread id of the owner in * which case we can check for this situation directly */ bool at_safe_spot(thread_record_t *trec, priv_mcontext_t *mc, thread_synch_state_t desired_state) { bool safe = false; if (waiting_at_safe_spot(trec, desired_state)) return true; #ifdef ARM if (TESTANY(EFLAGS_IT, mc->cpsr)) { LOG(THREAD_GET, LOG_SYNCH, 2, "thread "TIDFMT" not at safe spot (pc="PFX" in an IT block) for %d\n", trec->id, mc->pc, desired_state); return false; } #endif /* check if suspended at good spot */ /* FIXME: right now don't distinguish between suspend and term privileges * even though suspend is stronger requirement, are the checks below * sufficient */ /* FIXME : check with respect to flush, should be ok */ /* test fcache_unit_areas.lock (from fcache.c) before calling recreate_app_state * since it calls in_fcache() which uses the lock (if we are in_fcache() * assume other locks are not a problem (so is_dynamo_address is fine)) */ /* Right now the only dr code that ends up in the cache is our DLL main * (which we'll reduce/get rid of with libc independence), our takeover * from preinject return stack, and the callback.c interception code. * FIXME : test for just these and ASSERT(!is_dynamo_address) otherwise */ if (is_thread_currently_native(trec)) { /* thread is running native, verify is not in dr code */ #ifdef CLIENT_INTERFACE /* We treat client-owned threads (such as a client nudge thread) as native and * consider them safe if they are in the client_lib. Since they might own client * locks that could block application threads from progressing, we synchronize * with them last. FIXME - xref PR 231301 - since we can't disambiguate * client->ntdll/gencode which is safe from client->dr->ntdll/gencode which isn't * we disallow both. This could hurt synchronization efficiency if the client * owned thread spent most of its execution time calling out of its lib to ntdll * routines or generated code. */ if (IS_CLIENT_THREAD(trec->dcontext)) { safe = (trec->dcontext->client_data->client_thread_safe_for_synch || is_in_client_lib(mc->pc)) && /* Do not cleanup/terminate a thread holding a client lock (PR 558463) */ /* Actually, don't consider a thread holding a client lock to be safe * at all (PR 609569): client should use * dr_client_thread_set_suspendable(false) if its thread spends a lot * of time holding locks. */ (!should_suspend_client_thread(trec->dcontext, desired_state) || trec->dcontext->client_data->mutex_count == 0); } #endif if (is_native_thread_state_valid(trec->dcontext, mc->pc, (byte *)mc->xsp)) { safe = true; /* We should always be able to translate a valid native state, but be * sure to check before thread_attach_exit(). */ ASSERT(translate_mcontext(trec, mc, false/*just querying*/, NULL)); #ifdef WINDOWS if (mc->pc == (app_pc) thread_attach_takeover && THREAD_SYNCH_IS_CLEANED(desired_state)) { /* The takeover data will be freed at process exit, but we might * clean up a thread mid-run, so make sure we free the data. */ thread_attach_exit(trec->dcontext, mc); } #endif } #ifdef CLIENT_INTERFACE } else if (desired_state == THREAD_SYNCH_TERMINATED_AND_CLEANED && trec->dcontext->whereami == WHERE_FCACHE && trec->dcontext->client_data->at_safe_to_terminate_syscall) { /* i#1420: At safe to terminate syscall like dr_sleep in a clean call. * XXX: A thread in dr_sleep might not be safe to terminate for some * corner cases: for example, a client may hold a lock and then go sleep, * terminating it may mess the client up for not releasing the lock. * We limit this to the thread being in fcache (i.e., from a clean call) * to rule out some corner cases. */ safe = true; #endif } else if ((!WRITE_LOCK_HELD(&fcache_unit_areas->lock) && /* even though we only need the read lock, if our target holds it * and a 3rd thread requests the write lock, we'll hang if we * ask for the read lock (case 7493) */ !READ_LOCK_HELD(&fcache_unit_areas->lock)) && recreate_app_state(trec->dcontext, mc, false/*just query*/, NULL) == RECREATE_SUCCESS_STATE && /* It's ok to call is_dynamo_address even though it grabs many * locks because recreate_app_state succeeded. */ !is_dynamo_address(mc->pc)) { safe = true; } if (safe) { ASSERT(trec->dcontext->whereami == WHERE_FCACHE || is_thread_currently_native(trec)); LOG(THREAD_GET, LOG_SYNCH, 2, "thread "TIDFMT" suspended at safe spot pc="PFX"\n", trec->id, mc->pc); return true; } LOG(THREAD_GET, LOG_SYNCH, 2, "thread "TIDFMT" not at safe spot (pc="PFX") for %d\n", trec->id, mc->pc, desired_state); return false; } /* a fast way to tell a thread if it should call check_wait_at_safe_spot * if translating context would be expensive */ bool should_wait_at_safe_spot(dcontext_t *dcontext) { thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; return (tsd->pending_synch_count != 0); } /* use with care! normally check_wait_at_safe_spot() should be called instead */ void set_synch_state(dcontext_t *dcontext, thread_synch_permission_t state) { if (state >= THREAD_SYNCH_NO_LOCKS) ASSERT_OWN_NO_LOCKS(); thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; spinmutex_lock(tsd->synch_lock); tsd->synch_perm = state; spinmutex_unlock(tsd->synch_lock); } /* checks to see if any threads are waiting to synch with this one and waits * if they are * cur_state - a given permission define from above that describes the current * state of the caller * NOTE - Requires the caller is !could_be_linking (i.e. not in an * enter_couldbelinking state) */ void check_wait_at_safe_spot(dcontext_t *dcontext, thread_synch_permission_t cur_state) { thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; app_pc pc; byte cxt[MAX(CONTEXT_HEAP_SIZE_OPAQUE, sizeof(priv_mcontext_t))]; bool set_context = false; bool set_mcontext = false; if (tsd->pending_synch_count == 0 || cur_state == THREAD_SYNCH_NONE) return; ASSERT(tsd->pending_synch_count >= 0); pc = get_mcontext(dcontext)->pc; LOG(THREAD, LOG_SYNCH, 2, "waiting for synch with state %d (pc "PFX")\n", cur_state, pc); if (cur_state == THREAD_SYNCH_VALID_MCONTEXT) { ASSERT(!is_dynamo_address(pc)); /* for detach must set this here and now */ IF_WINDOWS(IF_CLIENT_INTERFACE(set_last_error(dcontext->app_errno))); } spinmutex_lock(tsd->synch_lock); tsd->synch_perm = cur_state; /* Since can be killed, suspended, etc. must call the exit dr hook. But, to * avoid races, we must do so before giving up the synch_lock. This is why * that lock has to be in unprotected memory. FIXME - for single thread in * dr this will lead to rank order violation between dr exclusivity lock * and the synch_lock with no easy workaround (real deadlocks possible). * Luckily we'll prob. never use that option. */ if (INTERNAL_OPTION(single_thread_in_DR)) { ASSERT_NOT_IMPLEMENTED(false); } EXITING_DR(); /* Ref case 5074, for us/app to successfully SetThreadContext at * this synch point, this thread can NOT be at a system call. So, for * case 10101, we instead have threads that are waiting_at_safe_spot() * set their own contexts, allowing us to make system calls here. * We don't yet handle the detach case, so it still requires no system * calls, including the act of releasing the synch_lock * which is why that lock has to be a user mode spin yield lock. * FIXME: we could change tsd->synch_lock back to a regular lock * once we have detach handling system calls here. */ spinmutex_unlock(tsd->synch_lock); while (tsd->pending_synch_count > 0 && tsd->synch_perm != THREAD_SYNCH_NONE) { STATS_INC_DC(dcontext, synch_loops_wait_safe); #ifdef WINDOWS if (started_detach) { /* We spin for any non-detach synchs encountered during detach * since we have no flag telling us this synch is for detach. */ /* Ref case 5074, can NOT use os_thread_yield here. This must be a user * mode spin loop. */ SPINLOCK_PAUSE(); } else { #endif /* FIXME case 10100: replace this sleep/yield with a wait_for_event() */ synch_thread_yield(); #ifdef WINDOWS } #endif } /* Regain the synch_lock before ENTERING_DR to avoid races with getting * suspended/killed in the middle of ENTERING_DR (before synch_perm is * reset to NONE). */ /* Ref case 5074, for detach we still can NOT use os_thread_yield here (no system * calls) so don't allow the spinmutex_lock to yield while grabbing the lock. */ spinmutex_lock_no_yield(tsd->synch_lock); ENTERING_DR(); tsd->synch_perm = THREAD_SYNCH_NONE; if (tsd->set_mcontext != NULL || tsd->set_context != NULL) { IF_WINDOWS(ASSERT(!started_detach)); /* Make a local copy */ ASSERT(sizeof(cxt) >= sizeof(priv_mcontext_t)); if (tsd->set_mcontext != NULL) { set_mcontext = true; memcpy(cxt, tsd->set_mcontext, sizeof(*tsd->set_mcontext)); } else { set_context = true; memcpy(cxt, tsd->set_context, tsd->set_context_size); } synch_thread_free_setcontext(tsd); /* sets to NULL for us */ } spinmutex_unlock(tsd->synch_lock); LOG(THREAD, LOG_SYNCH, 2, "done waiting for synch with state %d (pc "PFX")\n", cur_state, pc); if (set_mcontext || set_context) { /* FIXME: see comment in dispatch.c check_wait_at_safe_spot() call * about problems with KSTART(fcache_* differences bet the target * being at the synch point vs in the cache. */ if (set_mcontext) thread_set_self_mcontext((priv_mcontext_t *)cxt); else thread_set_self_context((void *)cxt); ASSERT_NOT_REACHED(); } } /* adjusts the pending synch count */ void adjust_wait_at_safe_spot(dcontext_t *dcontext, int amt) { thread_synch_data_t *tsd = (thread_synch_data_t *) dcontext->synch_field; ASSERT(tsd->pending_synch_count >= 0); spinmutex_lock(tsd->synch_lock); ATOMIC_ADD(int, tsd->pending_synch_count, amt); spinmutex_unlock(tsd->synch_lock); } /* Case 10101: Safely sets the context for a target thread that may be waiting at a * safe spot, in which case we do not want to directly do a setcontext as the return * from the yield or wait system call will mess up the state (case 5074). * Assumes that cxt was allocated on the global heap, and frees it, rather than * making its own copy (as an optimization). * Does not work on the executing thread. * Caller must hold thread_initexit_lock. * If used on behalf of the app, it's up to the caller to check for privileges. */ bool set_synched_thread_context(thread_record_t *trec, /* pass either mc or both cxt and cxt_size */ priv_mcontext_t *mc, void *cxt, size_t cxt_size, thread_synch_state_t desired_state _IF_X64(byte *cxt_alloc) _IF_WINDOWS(NTSTATUS *status/*OUT*/)) { bool res = true; ASSERT(trec != NULL && trec->dcontext != NULL); ASSERT(trec->dcontext != get_thread_private_dcontext()); ASSERT_OWN_MUTEX(true, &thread_initexit_lock); #ifdef WINDOWS if (status != NULL) *status = STATUS_SUCCESS; #endif if (waiting_at_safe_spot(trec, desired_state)) { /* case 10101: to allow system calls in check_wait_at_safe_spot() for * performance reasons we have the waiting thread perform its own setcontext. */ thread_synch_data_t *tsd = (thread_synch_data_t *) trec->dcontext->synch_field; spinmutex_lock(tsd->synch_lock); if (tsd->set_mcontext != NULL || tsd->set_context != NULL) { /* Two synchs in a row while still waiting; 2nd takes precedence */ STATS_INC(wait_multiple_setcxt); synch_thread_free_setcontext(tsd); } #ifdef WINDOWS LOG(THREAD_GET, LOG_SYNCH, 2, "set_synched_thread_context %d to pc "PFX" via %s\n", trec->id, (mc != NULL) ? mc->pc : (app_pc)((CONTEXT*)cxt)->CXT_XIP, (mc != NULL) ? "mc" : "CONTEXT"); #else ASSERT_NOT_IMPLEMENTED(mc != NULL); /* XXX: need sigcontext or sig_full_cxt_t */ #endif if (mc != NULL) tsd->set_mcontext = mc; else { ASSERT(cxt != NULL && cxt_size > 0); tsd->set_context = cxt; tsd->set_context_size = cxt_size; } IF_X64(tsd->set_context_alloc = cxt_alloc); ASSERT(THREAD_SYNCH_SAFE(tsd->synch_perm, desired_state)); ASSERT(tsd->pending_synch_count >= 0); /* Don't need to change pending_synch_count or anything; when thread is * resumed it will properly reset everything itself */ spinmutex_unlock(tsd->synch_lock); } else { if (mc != NULL) { res = thread_set_mcontext(trec, mc); } else { #ifdef WINDOWS /* sort of ugly: but NtSetContextThread handling needs the status */ if (status != NULL) { *status = nt_set_context(trec->handle, (CONTEXT *) cxt); res = NT_SUCCESS(*status); } else res = thread_set_context(trec->handle, (CONTEXT *) cxt); #else /* currently there are no callers who don't pass mc: presumably * PR 212090 will change that */ ASSERT_NOT_IMPLEMENTED(false); #endif } free_setcontext(mc, cxt, cxt_size _IF_X64(cxt_alloc)); } return res; } /* This is used to limit the maximum number of times synch_with_thread or * synch_with_all_threads spin yield loops while waiting on an exiting thread. * We assert if we ever break out of the loop because of this limit. FIXME make * sure this limit is large enough that if it does ever trigger it's because * of some kind of deadlock situation. Breaking out of the synchronization loop * early is a correctness issue. Right now the limits are large but arbitrary. * FIXME : once we are confident about thread synch get rid of these max loop checks. * N.B.: the THREAD_SYNCH_SMALL_LOOP_MAX flag causes us to divide these by 10. */ #define SYNCH_ALL_THREADS_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_all_threads_max_loops)) #define SYNCH_MAXIMUM_LOOPS (DYNAMO_OPTION(synch_thread_max_loops)) /* Amt of time in ms to wait for threads to get to a safe spot per a loop, * see comments in synch_with_yield() on value. Our default value is 5ms which, * depending on the tick resolution could end up being as long as 10 ms. */ #define SYNCH_WITH_WAIT_MS ((int)DYNAMO_OPTION(synch_with_sleep_time)) /* for use by synch_with_* routines to wait for thread(s) */ static void synch_thread_yield() { /* xref 9400, 9488 - os_thread_yield() works ok on an UP machine, but on an MP machine * yield might not actually do anything (in which case we burn through to the max * loop counts pretty quick). We actually do want to wait a reasonable amt of time * since the target thread might be doing some long latency dr operation (like * dumping 500kb of registry into a forensics file) so we have the option to sleep * instead. */ uint num_procs = get_num_processors(); ASSERT(num_procs != 0); if ((num_procs == 1 && DYNAMO_OPTION(synch_thread_sleep_UP)) || (num_procs > 1 && DYNAMO_OPTION(synch_thread_sleep_MP))) { os_thread_sleep(SYNCH_WITH_WAIT_MS); } else { os_thread_yield(); } } /* returns a thread_synch_result_t value * id - the thread you want to synch with * block - whether or not should spin until synch is successful * hold_initexit_lock - whether or not the caller holds the thread_initexit_lock * caller_state - a given permission define from above that describes the * current state of the caller (note that holding the initexit * lock is ok with respect to NO_LOCK * desired_state - a requested state define from above that describes the * desired synchronization * flags - options from THREAD_SYNCH_ bitmask values * NOTE - if you hold the initexit_lock and block with greater than NONE for * caller state, then initexit_lock may be released and re-acquired * NOTE - if any of the nt_ routines fails, it is assumed the thread no longer * exists and returns true * NOTE - if called directly (i.e. not through synch_with_all_threads) * requires THREAD_SYNCH_IS_SAFE(caller_state, desired_state) to avoid deadlock * NOTE - Requires the caller is !could_be_linking (i.e. not in an * enter_couldbelinking state) * NOTE - you can't call this with a thread that you've already suspended */ thread_synch_result_t synch_with_thread(thread_id_t id, bool block, bool hold_initexit_lock, thread_synch_permission_t caller_state, thread_synch_state_t desired_state, uint flags) { thread_id_t my_id = get_thread_id(); uint loop_count = 0; int expect_exiting = 0; thread_record_t *my_tr = thread_lookup(my_id), *trec = NULL; dcontext_t *dcontext = NULL; priv_mcontext_t mc; thread_synch_result_t res = THREAD_SYNCH_RESULT_NOT_SAFE; bool first_loop = true; IF_UNIX(bool actually_suspended = true;) const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags) ? (SYNCH_MAXIMUM_LOOPS/10) : SYNCH_MAXIMUM_LOOPS; ASSERT(id != my_id); /* Must set ABORT or IGNORE. Only caller can RETRY as need a new * set of threads for that, hoping problematic one is short-lived. */ ASSERT(TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)); if (my_tr != NULL) { dcontext = my_tr->dcontext; expect_exiting = dcontext->is_exiting ? 1 : 0; ASSERT(exiting_thread_count >= expect_exiting); } else { /* calling thread should always be a known thread */ ASSERT_NOT_REACHED(); } LOG(THREAD, LOG_SYNCH, 2, "Synching with thread "TIDFMT", giving %d, requesting %d, blocking=%d\n", id, caller_state, desired_state, block); if (!hold_initexit_lock) mutex_lock(&thread_initexit_lock); while (true) { /* get thread record */ /* FIXME : thread id recycling is possible that this could be a * different thread, perhaps we should take handle instead of id * FIXME: use the new num field of thread_record_t? */ LOG(THREAD, LOG_SYNCH, 3, "Looping on synch with thread "TIDFMT"\n", id); trec = thread_lookup(id); /* We test the exiting thread count to avoid races between terminate/ * suspend thread (current thread, though we could be here for other * reasons) and an exiting thread (who might no longer be on the all * threads list) who is still using shared resources (ref case 3121) */ if ((trec == NULL && exiting_thread_count == expect_exiting) || loop_count++ > max_loops) { /* make sure we didn't exit the loop without synchronizing, FIXME : * in release builds we assume the synchronization is failing and * continue without it, but that is dangerous. * It is now up to the caller to handle this, and some use * small loop counts and abort on failure, so only a curiosity. */ ASSERT_CURIOSITY(loop_count < max_loops); LOG(THREAD, LOG_SYNCH, 3, "Exceeded loop count synching with thread "TIDFMT"\n", id); goto exit_synch_with_thread; } DOSTATS({ if (trec == NULL && exiting_thread_count > expect_exiting) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n"); STATS_INC(synch_yields_for_exiting_thread); } }); #ifdef UNIX if (trec != NULL && trec->execve) { /* i#237/PR 498284: clean up vfork "threads" that invoked execve. * There should be no race since vfork suspends the parent. */ res = THREAD_SYNCH_RESULT_SUCCESS; actually_suspended = false; break; } #endif if (trec != NULL) { if (first_loop) { adjust_wait_at_safe_spot(trec->dcontext, 1); first_loop = false; } if (!os_thread_suspend(trec)) { /* FIXME : eventually should be a real assert once we figure out * how to handle threads with low privilege handles */ /* For dr_api_exit, we may have missed a thread exit. */ ASSERT_CURIOSITY_ONCE(IF_APP_EXPORTS(dr_api_exit ||) (false && "Thead synch unable to suspend target" " thread, case 2096?")); res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) ? THREAD_SYNCH_RESULT_SUCCESS : THREAD_SYNCH_RESULT_SUSPEND_FAILURE); IF_UNIX(actually_suspended = false); break; } if (!thread_get_mcontext(trec, &mc)) { /* FIXME : eventually should be a real assert once we figure out * how to handle threads with low privilege handles */ ASSERT_CURIOSITY_ONCE(false && "Thead synch unable to get_context target" " thread, case 2096?"); res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) ? THREAD_SYNCH_RESULT_SUCCESS : THREAD_SYNCH_RESULT_SUSPEND_FAILURE); /* Make sure to not leave suspended if not returning success */ if (!TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)) os_thread_resume(trec); break; } if (at_safe_spot(trec, &mc, desired_state)) { /* FIXME: case 5325 for detach handling and testing */ IF_WINDOWS(ASSERT_NOT_IMPLEMENTED (!dcontext->aslr_context.sys_aslr_clobbered)); LOG(THREAD, LOG_SYNCH, 2, "Thread "TIDFMT" suspended in good spot\n", id); LOG(trec->dcontext->logfile, LOG_SYNCH, 2, "@@@@@@@@@@@@@@@@@@ SUSPENDED BY THREAD "TIDFMT" synch_with_thread " "@@@@@@@@@@@@@@@@@@\n", my_id); res = THREAD_SYNCH_RESULT_SUCCESS; break; } if (!os_thread_resume(trec)) { ASSERT_NOT_REACHED(); res = (TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) ? THREAD_SYNCH_RESULT_SUCCESS : THREAD_SYNCH_RESULT_SUSPEND_FAILURE); break; } } /* don't loop if !block, before we ever release initexit_lock in case * caller is holding it and not blocking, (i.e. wants to keep it) */ if (!block) break; /* see if someone is waiting for us */ if (dcontext != NULL && caller_state != THREAD_SYNCH_NONE && should_wait_at_safe_spot(dcontext)) { if (trec != NULL) adjust_wait_at_safe_spot(trec->dcontext, -1); mutex_unlock(&thread_initexit_lock); /* ref case 5552, if we've inc'ed the exiting thread count need to * adjust it back before calling check_wait_at_safe_spot since we * may end up being killed there */ if (dcontext->is_exiting) { ASSERT(exiting_thread_count >= 1); ATOMIC_DEC(int, exiting_thread_count); } check_wait_at_safe_spot(dcontext, caller_state); if (dcontext->is_exiting) { ATOMIC_INC(int, exiting_thread_count); } mutex_lock(&thread_initexit_lock); trec = thread_lookup(id); /* Like above, we test the exiting thread count to avoid races * between terminate/suspend thread (current thread, though we * could be here for other reasons) and an exiting thread (who * might no longer be on the all threads list) who is still using * shared resources (ref case 3121) */ if (trec == NULL && exiting_thread_count == expect_exiting) { if (!hold_initexit_lock) mutex_unlock(&thread_initexit_lock); return THREAD_SYNCH_RESULT_SUCCESS; } DOSTATS({ if (trec == NULL && exiting_thread_count > expect_exiting) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread\n"); STATS_INC(synch_yields_for_exiting_thread); } }); if (trec != NULL) adjust_wait_at_safe_spot(trec->dcontext, 1); } STATS_INC(synch_yields); mutex_unlock(&thread_initexit_lock); /* Note - we only need call the ENTER/EXIT_DR hooks if single thread * in dr since we are not really exiting DR here (we just need to give * up the exclusion lock for a while to let thread we are trying to * synch with make progress towards a safe synch point). */ if (INTERNAL_OPTION(single_thread_in_DR)) EXITING_DR(); /* give up DR exclusion lock */ synch_thread_yield(); if (INTERNAL_OPTION(single_thread_in_DR)) ENTERING_DR(); /* re-gain DR exclusion lock */ mutex_lock(&thread_initexit_lock); } /* reset this back to before */ adjust_wait_at_safe_spot(trec->dcontext, -1); /* success!, is suspended (or already exited) put in desired state */ if (res == THREAD_SYNCH_RESULT_SUCCESS) { LOG(THREAD, LOG_SYNCH, 2, "Success synching with thread "TIDFMT" performing cleanup\n", id); if (THREAD_SYNCH_IS_TERMINATED(desired_state)) { if (IF_UNIX_ELSE(!trec->execve, true)) os_thread_terminate(trec); #ifdef UNIX /* We need to ensure the target thread has received the * signal and is no longer using its sigstack or ostd struct * before we clean those up. */ /* PR 452168: if failed to send suspend signal, do not spin */ if (actually_suspended) { if (!is_thread_terminated(trec->dcontext)) { /* i#96/PR 295561: use futex(2) if available. Blocks until * the thread gets terminated. */ os_wait_thread_terminated(trec->dcontext); } } else ASSERT(TEST(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags)); #endif } if (THREAD_SYNCH_IS_CLEANED(desired_state)) { dynamo_other_thread_exit(trec _IF_WINDOWS(false)); } } exit_synch_with_thread: if (!hold_initexit_lock) mutex_unlock(&thread_initexit_lock); return res; } /* desired_synch_state - a requested state define from above that describes * the synchronization required * threads, num_threads - must not be NULL, if !THREAD_SYNCH_IS_CLEANED(desired * synch_state) then will hold a list and num of threads * cur_state - a given permission from above that describes the state of the * caller * flags - options from THREAD_SYNCH_ bitmask values * NOTE - Requires that the caller doesn't hold the thread_initexit_lock, on * return caller will hold the thread_initexit_lock * NOTE - Requires the caller is !could_be_linking (i.e. not in an * enter_couldbelinking state) * NOTE - To avoid deadlock this routine should really only be called with * cur_state giving maximum permissions, (currently app_exit and detach could * conflict, except our routes to app_exit go through different synch point * (TermThread or TermProcess) first * NOTE - when !all_synched, if desired_synch_state is not cleaned or synch result is * ignored, the caller is reponsible for resuming threads that are suspended, * freeing allocation for threads array and releasing locks * Caller should call end_synch_with_all_threads when finished to accomplish that. */ bool synch_with_all_threads(thread_synch_state_t desired_synch_state, /*OUT*/ thread_record_t ***threads_out, /*OUT*/ int *num_threads_out, thread_synch_permission_t cur_state, /* FIXME: turn the ThreadSynch* enums into bitmasks and merge * into flags param */ uint flags) { /* Case 8815: we cannot use the OUT params themselves internally as they * may be volatile, so we need our own values until we're ready to return */ bool threads_are_stale = true; thread_record_t **threads = NULL; int num_threads = 0; /* we record ids from before we gave up thread_initexit_lock */ thread_id_t *thread_ids_temp = NULL; int num_threads_temp = 0, i, j, expect_exiting = 0; /* synch array contains a SYNCH_WITH_ALL_ value for each thread */ uint *synch_array = NULL, *synch_array_temp = NULL; enum { SYNCH_WITH_ALL_NEW = 0, SYNCH_WITH_ALL_NOTIFIED = 1, SYNCH_WITH_ALL_SYNCHED = 2, }; bool all_synched = false; thread_id_t my_id = get_thread_id(); uint loop_count = 0; thread_record_t *tr = thread_lookup(my_id); dcontext_t *dcontext = NULL; uint flags_one; /* flags for synch_with_thread() call */ thread_synch_result_t synch_res; const uint max_loops = TEST(THREAD_SYNCH_SMALL_LOOP_MAX, flags) ? (SYNCH_ALL_THREADS_MAXIMUM_LOOPS/10) : SYNCH_ALL_THREADS_MAXIMUM_LOOPS; #ifdef CLIENT_INTERFACE /* We treat client-owned threads as native but they don't have a clean native state * for us to suspend them in (they are always in client or dr code). We need to be * able to suspend such threads so that they're !couldbelinking and holding no dr * locks. We make the assumption that client-owned threads that are in the client * library (or are in a dr routine that has set dcontext->client_thread_safe_to_sync) * meet this requirement (see at_safe_spot()). As such, all we need to worry about * here are client locks the client-owned thread might hold that could block other * threads from reaching safe spots. If we only suspend client-owned threads once * all other threads are taken care of then this is not a problem. FIXME - xref * PR 231301 on issues that arise if the client thread spends most of its time * calling out of its lib to dr API, ntdll, or generated code functions. */ bool finished_non_client_threads; #endif ASSERT(!dynamo_all_threads_synched); /* flag any caller who does not give up enough permissions to avoid livelock * with other synch_with_all_threads callers */ ASSERT_CURIOSITY(cur_state >= THREAD_SYNCH_NO_LOCKS_NO_XFER); /* also flag anyone asking for full mcontext w/o possibility of no_xfer, * which can also livelock */ ASSERT_CURIOSITY(desired_synch_state < THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT /* detach currently violates this: bug 8942 */ || started_detach); /* must set exactly one of these -- FIXME: better way to check? */ ASSERT(TESTANY(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_IGNORE, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_ABORT | THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags) && !TESTALL(THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags)); flags_one = flags; /* we'll do the retry */ if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_RETRY, flags)) { flags_one &= ~THREAD_SYNCH_SUSPEND_FAILURE_RETRY; flags_one |= THREAD_SYNCH_SUSPEND_FAILURE_ABORT; } if (tr != NULL) { dcontext = tr->dcontext; expect_exiting = dcontext->is_exiting ? 1 : 0; ASSERT(exiting_thread_count >= expect_exiting); } else { /* calling thread should always be a known thread */ ASSERT_NOT_REACHED(); } LOG(THREAD, LOG_SYNCH, 1, "synch with all threads my id = "SZFMT " Giving %d permission and seeking %d state\n", my_id, cur_state, desired_synch_state); /* grab all_threads_synch_lock */ /* since all_threads synch doesn't give any permissions this is necessary * to prevent deadlock in the case of two threads trying to synch with all * threads at the same time */ /* FIXME: for DEADLOCK_AVOIDANCE, to preserve LIFO, should we * exit DR, trylock, then immediately enter DR? introducing any * race conditions in doing so? * Ditto on all other os_thread_yields in this file! */ while (!mutex_trylock(&all_threads_synch_lock)) { LOG(THREAD, LOG_SYNCH, 2, "Spinning on all threads synch lock\n"); STATS_INC(synch_yields); if (dcontext != NULL && cur_state != THREAD_SYNCH_NONE && should_wait_at_safe_spot(dcontext)) { /* ref case 5552, if we've inc'ed the exiting thread count need to * adjust it back before calling check_wait_at_safe_spot since we * may end up being killed there */ if (dcontext->is_exiting) { ASSERT(exiting_thread_count >= 1); ATOMIC_DEC(int, exiting_thread_count); } check_wait_at_safe_spot(dcontext, cur_state); if (dcontext->is_exiting) { ATOMIC_INC(int, exiting_thread_count); } } LOG(THREAD, LOG_SYNCH, 2, "Yielding on all threads synch lock\n"); /* Note - we only need call the ENTER/EXIT_DR hooks if single thread * in dr since we are not really exiting DR here (we just need to give * up the exclusion lock for a while to let thread we are trying to * synch with make progress towards a safe synch point). */ if (INTERNAL_OPTION(single_thread_in_DR)) EXITING_DR(); /* give up DR exclusion lock */ os_thread_yield(); if (INTERNAL_OPTION(single_thread_in_DR)) ENTERING_DR(); /* re-gain DR exclusion lock */ } mutex_lock(&thread_initexit_lock); /* synch with all threads */ /* FIXME: this should be a do/while loop - then we wouldn't have * to initialize all the variables above */ while (threads_are_stale || !all_synched || exiting_thread_count > expect_exiting || uninit_thread_count > 0) { if (threads != NULL){ /* Case 8941: must free here rather than when yield (below) since * termination condition can change between there and here */ ASSERT(num_threads > 0); global_heap_free(threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT)); /* be paranoid */ threads = NULL; num_threads = 0; } get_list_of_threads(&threads, &num_threads); threads_are_stale = false; synch_array = (uint *)global_heap_alloc(num_threads * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT)); for (i = 0; i < num_threads; i++) { synch_array[i] = SYNCH_WITH_ALL_NEW; } /* Fixme : an inefficient algorithm, but is not as bad as it seems * since it is very unlikely that many threads have started or ended * and the list threads routine always puts them in the same order */ /* on first loop num_threads_temp == 0 */ for (i = 0; i < num_threads_temp; i++) { /* care only if we have already notified or synched thread */ if (synch_array_temp[i] != SYNCH_WITH_ALL_NEW) { for (j = 0; j < num_threads; j++) { /* FIXME : os recycles thread ids, should have stronger * check here, could check dcontext equivalence, (but we * recycle those to), probably should check threads_temp * handle and be sure thread is still alive since the id * won't be recycled then */ if (threads[j]->id == thread_ids_temp[i]) { synch_array[j] = synch_array_temp[i]; break; } } } } /* free old synch list, old thread id list */ if (num_threads_temp > 0) { global_heap_free(thread_ids_temp, num_threads_temp * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT)); global_heap_free(synch_array_temp, num_threads_temp * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT)); num_threads_temp = 0; } all_synched = true; LOG(THREAD, LOG_SYNCH, 3, "Looping over all threads (%d threads)\n", num_threads); #ifdef CLIENT_INTERFACE finished_non_client_threads = true; for (i = 0; i < num_threads; i++) { if (threads[i]->id != my_id && synch_array[i] != SYNCH_WITH_ALL_SYNCHED && !IS_CLIENT_THREAD(threads[i]->dcontext)) { finished_non_client_threads = false; break; } } #endif /* make a copy of the thread ids (can't just keep the thread list * since it consists of pointers to live thread_record_t structs). * we must make the copy before synching b/c cleaning up a thread * involves freeing its thread_record_t. */ thread_ids_temp = (thread_id_t *) global_heap_alloc(num_threads * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT)); for (i = 0; i < num_threads; i++) thread_ids_temp[i] = threads[i]->id; num_threads_temp = num_threads; synch_array_temp = synch_array; for (i = 0; i < num_threads; i++) { /* do not de-ref threads[i] after synching if it was cleaned up! */ if (synch_array[i] != SYNCH_WITH_ALL_SYNCHED && threads[i]->id != my_id) { #ifdef CLIENT_INTERFACE if (!finished_non_client_threads && IS_CLIENT_THREAD(threads[i]->dcontext)) { all_synched = false; continue; /* skip this thread for now till non-client are finished */ } if (IS_CLIENT_THREAD(threads[i]->dcontext) && (TEST(flags, THREAD_SYNCH_SKIP_CLIENT_THREAD) || !should_suspend_client_thread(threads[i]->dcontext, desired_synch_state))) { /* PR 609569: do not suspend this thread. * Avoid races between resume_all_threads() and * dr_client_thread_set_suspendable() by storing the fact. * * For most of our synchall purposes we really want to prevent * threads from acting on behalf of the application, and make * sure we can relocate them if in the code cache. DR itself is * thread-safe, and while a synchall-initiator will touch * thread-private data for threads it suspends, having some * threads it does not suspend shouldn't cause any problems so * long as it doesn't touch their thread-private data. */ synch_array[i] = SYNCH_WITH_ALL_SYNCHED; threads[i]->dcontext->client_data->left_unsuspended = true; continue; } #endif /* speed things up a tad */ if (synch_array[i] != SYNCH_WITH_ALL_NOTIFIED) { ASSERT(synch_array[i] == SYNCH_WITH_ALL_NEW); adjust_wait_at_safe_spot(threads[i]->dcontext, 1); synch_array[i] = SYNCH_WITH_ALL_NOTIFIED; } LOG(THREAD, LOG_SYNCH, 2, "About to try synch with thread #%d/%d "TIDFMT"\n", i, num_threads, threads[i]->id); synch_res = synch_with_thread(threads[i]->id, false, true, THREAD_SYNCH_NONE, desired_synch_state, flags_one); if (synch_res == THREAD_SYNCH_RESULT_SUCCESS) { LOG(THREAD, LOG_SYNCH, 2, "Synch succeeded!\n"); /* successful synch */ synch_array[i] = SYNCH_WITH_ALL_SYNCHED; if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state)) adjust_wait_at_safe_spot(threads[i]->dcontext, -1); } else { LOG(THREAD, LOG_SYNCH, 2, "Synch failed!\n"); all_synched = false; if (synch_res == THREAD_SYNCH_RESULT_SUSPEND_FAILURE) { if (TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) goto synch_with_all_abort; } else ASSERT(synch_res == THREAD_SYNCH_RESULT_NOT_SAFE); } } else { LOG(THREAD, LOG_SYNCH, 2, "Skipping synch with thread "TIDFMT"\n", thread_ids_temp[i]); } } if (loop_count++ >= max_loops) break; /* We test the exiting thread count to avoid races between exit * process (current thread, though we could be here for detach or other * reasons) and an exiting thread (who might no longer be on the all * threads list) who is still using shared resources (ref case 3121) */ if (!all_synched || exiting_thread_count > expect_exiting || uninit_thread_count > 0) { DOSTATS({ if (all_synched && exiting_thread_count > expect_exiting) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an exiting thread %d %d %d\n", all_synched, exiting_thread_count, expect_exiting); STATS_INC(synch_yields_for_exiting_thread); } else if (all_synched && uninit_thread_count > 0) { LOG(THREAD, LOG_SYNCH, 2, "Waiting for an uninit thread %d %d\n", all_synched, uninit_thread_count); STATS_INC(synch_yields_for_uninit_thread); } }); STATS_INC(synch_yields); /* release lock in case some other thread waiting on it */ mutex_unlock(&thread_initexit_lock); LOG(THREAD, LOG_SYNCH, 2, "Not all threads synched looping again\n"); /* Note - we only need call the ENTER/EXIT_DR hooks if single * thread in dr since we are not really exiting DR here (we just * need to give up the exclusion lock for a while to let thread we * are trying to synch with make progress towards a safe synch * point). */ if (INTERNAL_OPTION(single_thread_in_DR)) EXITING_DR(); /* give up DR exclusion lock */ synch_thread_yield(); if (INTERNAL_OPTION(single_thread_in_DR)) ENTERING_DR(); /* re-gain DR exclusion lock */ mutex_lock(&thread_initexit_lock); /* We unlock and lock the thread_initexit_lock, so threads might be stale. */ threads_are_stale = true; } } /* case 9392: callers passing in ABORT expect a return value of failure * to correspond w/ no suspended threads, a freed threads array, and no * locks being held, so we go through the abort path */ if (!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) goto synch_with_all_abort; synch_with_all_exit: /* make sure we didn't exit the loop without synchronizing, FIXME : in * release builds we assume the synchronization is failing and continue * without it, but that is dangerous. * It is now up to the caller to handle this, and some use * small loop counts and abort on failure, so only a curiosity. */ ASSERT_CURIOSITY(loop_count < max_loops); ASSERT(threads != NULL); /* Since the set of threads can change we don't set the success field * until we're passing back the thread list. * We would use an tsd field directly instead of synch_array except * for THREAD_SYNCH_*_CLEAN where tsd is freed. */ ASSERT(synch_array != NULL); if (!THREAD_SYNCH_IS_CLEANED(desired_synch_state)) { /* else unsafe to access tsd */ for (i = 0; i < num_threads; i++) { if (threads[i]->id != my_id) { thread_synch_data_t *tsd; ASSERT(threads[i]->dcontext != NULL); tsd = (thread_synch_data_t *) threads[i]->dcontext->synch_field; tsd->synch_with_success = (synch_array[i] == SYNCH_WITH_ALL_SYNCHED); } } } global_heap_free(synch_array, num_threads * sizeof(uint) HEAPACCT(ACCT_THREAD_MGT)); if (num_threads_temp > 0) { global_heap_free(thread_ids_temp, num_threads_temp * sizeof(thread_id_t) HEAPACCT(ACCT_THREAD_MGT)); } /* FIXME case 9333: on all_synch failure we do not free threads array if * synch_result is ignored. Callers are responsible for resuming threads that are * suspended and freeing allocation for threads array */ if ((!all_synched && TEST(THREAD_SYNCH_SUSPEND_FAILURE_ABORT, flags)) || THREAD_SYNCH_IS_CLEANED(desired_synch_state)) { global_heap_free(threads, num_threads * sizeof(thread_record_t *) HEAPACCT(ACCT_THREAD_MGT)); threads = NULL; num_threads = 0; } LOG(THREAD, LOG_SYNCH, 1, "Finished synch with all threads: result=%d\n", all_synched); DOLOG(1, LOG_SYNCH, { if (all_synched) { LOG(THREAD, LOG_SYNCH, 1, "\treturning holding initexit_lock and all_threads_synch_lock\n"); } }); *threads_out = threads; *num_threads_out = num_threads; dynamo_all_threads_synched = all_synched; /* FIXME case 9392: where on all_synch failure we do not release the locks in the * non-abort exit path */ return all_synched; synch_with_all_abort: /* undo everything! */ for (i = 0; i < num_threads; i++) { DEBUG_DECLARE(bool ok;) if (threads[i]->id != my_id) { if (synch_array[i] == SYNCH_WITH_ALL_SYNCHED) { bool resume = true; #ifdef CLIENT_SIDELINE if (IS_CLIENT_THREAD(threads[i]->dcontext) && threads[i]->dcontext->client_data->left_unsuspended) { /* PR 609569: we did not suspend this thread */ resume = false; } #endif if (resume) { DEBUG_DECLARE(ok =) os_thread_resume(threads[i]); ASSERT(ok); } /* ensure synch_with_success is set to false on exit path, * even though locks are released and not fully valid */ synch_array[i] = SYNCH_WITH_ALL_NEW; } else if (synch_array[i] == SYNCH_WITH_ALL_NOTIFIED) { adjust_wait_at_safe_spot(threads[i]->dcontext, -1); } } } mutex_unlock(&thread_initexit_lock); mutex_unlock(&all_threads_synch_lock); ASSERT(!all_synched); /* ensure our OUT values will be NULL,0 for THREAD_SYNCH_SUSPEND_FAILURE_ABORT */ goto synch_with_all_exit; } /* Assumes that the threads were suspended with synch_with_all_threads() * and thus even is_thread_currently_native() threads were suspended. * Assumes that the caller will free up threads if it is dynamically allocated. */ void resume_all_threads(thread_record_t **threads, const uint num_threads) { uint i; thread_id_t my_tid; bool res; ASSERT_OWN_MUTEX(true, &all_threads_synch_lock); ASSERT_OWN_MUTEX(true, &thread_initexit_lock); if (threads == NULL || num_threads == 0) return; my_tid = get_thread_id(); for (i = 0; i < num_threads; i++) { if (my_tid == threads[i]->id) continue; #ifdef CLIENT_SIDELINE if (IS_CLIENT_THREAD(threads[i]->dcontext) && threads[i]->dcontext->client_data->left_unsuspended) { /* PR 609569: we did not suspend this thread */ threads[i]->dcontext->client_data->left_unsuspended = false; continue; } #endif /* This routine assumes that each thread in the array was suspended, so * each one has to successfully resume. */ res = os_thread_resume(threads[i]); ASSERT(res); } } /* Should be called to clean up after synch_with_all_threads as otherwise * dynamo_all_threads_synched will be left as true. * If resume is true, resumes the threads in the threads array. * Unlocks thread_initexit_lock and all_threads_synch_lock. * If threads != NULL, frees the threads array. */ void end_synch_with_all_threads(thread_record_t **threads, uint num_threads, bool resume) { /* dynamo_all_threads_synched will be false if synch failed */ ASSERT_CURIOSITY(dynamo_all_threads_synched); ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock)); dynamo_all_threads_synched = false; if (resume) { ASSERT(threads != NULL); resume_all_threads(threads, num_threads); } /* if we knew whether THREAD_SYNCH_*_CLEANED was specified we could set * synch_with_success to false, but it's unsafe otherwise */ mutex_unlock(&thread_initexit_lock); mutex_unlock(&all_threads_synch_lock); if (threads != NULL) { global_heap_free(threads, num_threads*sizeof(thread_record_t*) HEAPACCT(ACCT_THREAD_MGT)); } } /* Resets a thread's context to start interpreting anew. * ASSUMPTION: the thread is currently suspended. * This was moved here from fcache_reset_all_caches_proactively simply to * get access to win32-private CONTEXT-related routines */ void translate_from_synchall_to_dispatch(thread_record_t *tr, thread_synch_state_t synch_state) { bool res; /* we do not have to align priv_mcontext_t */ priv_mcontext_t *mc = global_heap_alloc(sizeof(*mc) HEAPACCT(ACCT_OTHER)); bool free_cxt = true; dcontext_t *dcontext = tr->dcontext; app_pc pre_translation; ASSERT(OWN_MUTEX(&all_threads_synch_lock) && OWN_MUTEX(&thread_initexit_lock)); /* FIXME: would like to assert that suspendcount is > 0 but how? */ ASSERT(thread_synch_successful(tr)); res = thread_get_mcontext(tr, mc); ASSERT(res); pre_translation = (app_pc) mc->pc; LOG(GLOBAL, LOG_CACHE, 2, "\trecreating address for "PFX"\n", mc->pc); LOG(THREAD, LOG_CACHE, 2, "translate_from_synchall_to_dispatch: being translated from "PFX"\n", mc->pc); if (get_at_syscall(dcontext)) { /* Don't need to do anything as shared_syscall and do_syscall will not * change due to a reset and will have any inlined ibl updated. If we * did try to send these guys back to dispatch, have to set asynch_tag * (as well as next_tag since translation looks only at that), restore * TOS to asynch_target/esi (unless still at reset state), and have to * figure out how to avoid post-syscall processing for those who never * did pre-syscall processing (i.e., if at shared_syscall) (else will * get wrong dcontext->sysnum, etc.) * Not to mention that after resuming the kernel will finish the * syscall and clobber several registers, making it hard to set a * clean state (xref case 6113, case 5074, and notes below)! * It's just too hard to redirect while at a syscall. */ LOG(GLOBAL, LOG_CACHE, 2, "\tat syscall so not translating\n"); /* sanity check */ ASSERT(is_after_syscall_address(dcontext, pre_translation) || IF_WINDOWS_ELSE(pre_translation == vsyscall_after_syscall, is_after_or_restarted_do_syscall (dcontext, pre_translation, true/*vsys*/))); #if defined(UNIX) && defined(X86_32) if (pre_translation == vsyscall_sysenter_return_pc || pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc) { /* Because we remove the vsyscall hook on a send_all_other_threads_native() * yet have no barrier to know the threads have run their own go-native * code, we want to send them away from the hook, to our gencode. */ if (pre_translation == vsyscall_sysenter_return_pc) mc->pc = after_do_shared_syscall_addr(dcontext); else if (pre_translation + SYSENTER_LENGTH == vsyscall_sysenter_return_pc) mc->pc = get_do_int_syscall_entry(dcontext); /* exit stub and subsequent fcache_return will save rest of state */ res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0, synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL)); ASSERT(res); /* cxt is freed by set_synched_thread_context() or target thread */ free_cxt = false; } #endif IF_ARM({ if (INTERNAL_OPTION(steal_reg_at_reset) != 0) { /* We don't want to translate, just update the stolen reg values */ arch_mcontext_reset_stolen_reg(dcontext, mc); res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0, synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL)); ASSERT(res); /* cxt is freed by set_synched_thread_context() or target thread */ free_cxt = false; } }); } else { res = translate_mcontext(tr, mc, true/*restore memory*/, NULL); ASSERT(res); if (!thread_synch_successful(tr) || mc->pc == 0) { /* Better to risk failure on accessing a freed cache than * to have a guaranteed crash by sending to NULL. * FIXME: it's possible the real translation is NULL, * but if so should be fine to leave it there since the * current eip should also be NULL. */ ASSERT_NOT_REACHED(); goto translate_from_synchall_to_dispatch_exit; } LOG(GLOBAL, LOG_CACHE, 2, "\ttranslation pc = "PFX"\n", mc->pc); ASSERT(!is_dynamo_address((app_pc)mc->pc) && !in_fcache((app_pc)mc->pc)); IF_ARM({ if (INTERNAL_OPTION(steal_reg_at_reset) != 0) { /* XXX: do we need this? Will signal.c will fix it up prior * to sigreturn from suspend handler? */ arch_mcontext_reset_stolen_reg(dcontext, mc); } }); /* We send all threads, regardless of whether was in DR or not, to * re-interp from translated cxt, to avoid having to handle stale * local state problems if we simply resumed. * We assume no KSTATS or other state issues to deal with. * FIXME: enter hook w/o an exit? */ dcontext->next_tag = (app_pc) mc->pc; /* FIXME PR 212266: for linux if we're at an inlined syscall * we may have problems: however, we might be able to rely on the kernel * not clobbering any registers besides eax (which is ok: reset stub * handles it), though presumably it's allowed to write to any * caller-saved registers. We may need to change inlined syscalls * to set at_syscall (see comments below as well). */ if (pre_translation == IF_WINDOWS_ELSE(vsyscall_after_syscall, vsyscall_sysenter_return_pc) && !waiting_at_safe_spot(dcontext->thread_record, synch_state)) { /* FIXME case 7827/PR 212266: shouldn't translate for this case, right? * should have -ignore_syscalls set at_syscall and eliminate * this whole block of code */ /* put the proper retaddr back on the stack, as we won't * be doing the ret natively to regain control, but rather * will interpret it */ /* FIXME: ensure readable and writable? */ app_pc cur_retaddr = *((app_pc *)mc->xsp); app_pc native_retaddr; ASSERT(cur_retaddr != NULL); /* must be ignore_syscalls (else, at_syscall will be set) */ IF_WINDOWS(ASSERT(DYNAMO_OPTION(ignore_syscalls))); ASSERT(get_syscall_method() == SYSCALL_METHOD_SYSENTER); /* For DYNAMO_OPTION(sygate_sysenter) we need to restore both stack * values and fix up esp, but we can't do it here since the kernel * will change esp... incompatible w/ -ignore_syscalls anyway */ IF_WINDOWS(ASSERT_NOT_IMPLEMENTED(!DYNAMO_OPTION(sygate_sysenter))); /* may still be at syscall from a prior reset -- don't want to grab * locks for in_fcache so we determine via the translation */ ASSERT_NOT_TESTED(); native_retaddr = recreate_app_pc(dcontext, cur_retaddr, NULL); if (native_retaddr != cur_retaddr) { LOG(GLOBAL, LOG_CACHE, 2, "\trestoring TOS to "PFX" from "PFX"\n", native_retaddr, cur_retaddr); *((app_pc *)mc->xsp) = native_retaddr; } else { LOG(GLOBAL, LOG_CACHE, 2, "\tnot restoring TOS since still at previous reset state "PFX"\n", cur_retaddr); } } /* Send back to dispatch. Rather than setting up last_exit in eax here, * we point to a special routine to save the correct eax -- in fact it's * simply a direct exit stub. Originally this was b/c we tried to * translate threads at system calls, and the kernel clobbers eax (and * ecx/edx for sysenter, though preserves eip setcontext change: case * 6113, case 5074) in finishing the system call, but now that we don't * translate them we've kept the stub approach. It's actually faster * for the stub itself to save eax and set the linkstub than for us to * emulate it here, anyway. * Note that a thread in check_wait_at_safe_spot() spins and will NOT be * at a syscall, avoiding problems there (case 5074). */ mc->pc = (app_pc) get_reset_exit_stub(dcontext); LOG(GLOBAL, LOG_CACHE, 2, "\tsent to reset exit stub "PFX"\n", mc->pc); /* make dispatch happy */ dcontext->whereami = WHERE_FCACHE; #ifdef WINDOWS /* i#25: we could have interrupted thread in DR, where has priv fls data * in TEB, and fcache_return blindly copies into app fls: so swap to app * now, just in case. DR routine can handle swapping when already app. */ swap_peb_pointer(dcontext, false/*to app*/); #endif /* exit stub and subsequent fcache_return will save rest of state */ res = set_synched_thread_context(dcontext->thread_record, mc, NULL, 0, synch_state _IF_X64((void *)mc) _IF_WINDOWS(NULL)); ASSERT(res); /* cxt is freed by set_synched_thread_context() or target thread */ free_cxt = false; } translate_from_synchall_to_dispatch_exit: if (free_cxt) { global_heap_free(mc, sizeof(*mc) HEAPACCT(ACCT_OTHER)); } } /*************************************************************************** * Detach and similar operations */ /* Atomic variable to prevent multiple threads from trying to detach at * the same time. */ DECLARE_CXTSWPROT_VAR(static volatile int dynamo_detaching_flag, LOCK_FREE_STATE); void send_all_other_threads_native(void) { thread_record_t **threads; dcontext_t *my_dcontext = get_thread_private_dcontext(); int i, num_threads; bool waslinking; /* We're forced to use an asynch model due to not being able to call * dynamo_thread_not_under_dynamo, which has a bonus of making it easier * to handle other threads asking for synchall. * This is why we don't ask for THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT. */ const thread_synch_state_t desired_state = THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT_OR_NO_XFER; ASSERT(dynamo_initialized && !dynamo_exited && my_dcontext != NULL); LOG(my_dcontext->logfile, LOG_ALL, 1, "%s\n", __FUNCTION__); LOG(GLOBAL, LOG_ALL, 1, "%s: cur thread "TIDFMT"\n", __FUNCTION__, get_thread_id()); waslinking = is_couldbelinking(my_dcontext); if (waslinking) enter_nolinking(my_dcontext, NULL, false); #ifdef WINDOWS /* Ensure new threads will go straight to native */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); init_apc_go_native_pause = true; init_apc_go_native = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); # ifdef CLIENT_INTERFACE wait_for_outstanding_nudges(); # endif #endif /* Suspend all threads except those trying to synch with us */ if (!synch_with_all_threads(desired_state, &threads, &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER, THREAD_SYNCH_SUSPEND_FAILURE_IGNORE)) { REPORT_FATAL_ERROR_AND_EXIT(my_dcontext, FAILED_TO_SYNCHRONIZE_THREADS, 2, get_application_name(), get_application_pid()); } ASSERT(mutex_testlock(&all_threads_synch_lock) && mutex_testlock(&thread_initexit_lock)); #ifdef WINDOWS /* Let threads waiting at APC point go native */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); init_apc_go_native_pause = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); #endif #ifdef WINDOWS /* FIXME i#95: handle outstanding callbacks where we've put our retaddr on * the app stack. This should be able to share * detach_helper_handle_callbacks() code. Won't the old single-thread * dr_app_stop() have had this same problem? Since we're not tearing * everything down, can we solve it by waiting until we hit * after_shared_syscall_code_ex() in a native thread? */ ASSERT_NOT_IMPLEMENTED(get_syscall_method() != SYSCALL_METHOD_SYSENTER); #endif for (i = 0; i < num_threads; i++) { if (threads[i]->dcontext == my_dcontext || is_thread_currently_native(threads[i]) || IS_CLIENT_THREAD(threads[i]->dcontext)) continue; /* Because dynamo_thread_not_under_dynamo() has to be run by the owning * thread, the simplest solution is to send everyone back to dispatch * with a flag to go native from there, rather than directly setting the * native context. */ threads[i]->dcontext->go_native = true; if (thread_synch_state_no_xfer(threads[i]->dcontext)) { /* Another thread trying to synch with us: just let it go. It will * go native once it gets back to dispatch which will be before it * goes into the cache. */ continue; } else { LOG(my_dcontext->logfile, LOG_ALL, 1, "%s: sending thread %d native\n", __FUNCTION__, threads[i]->id); LOG(threads[i]->dcontext->logfile, LOG_ALL, 1, "**** requested by thread %d to go native\n", my_dcontext->owning_thread); /* This won't change a thread at a syscall, so we rely on the thread * going to dispatch and then going native when its syscall exits. * * FIXME i#95: That means the time to go native is, unfortunately, * unbounded. This means that dr_app_cleanup() needs to synch the * threads and force-xl8 these. We should share code with detach. * Right now we rely on the app joining all its threads *before* * calling dr_app_cleanup(), or using dr_app_stop_and_cleanup(). * This also means we have a race with unhook_vsyscall in * os_process_not_under_dynamorio(), which we solve by redirecting * threads at syscalls to our gencode. */ translate_from_synchall_to_dispatch(threads[i], desired_state); } } end_synch_with_all_threads(threads, num_threads, true/*resume*/); os_process_not_under_dynamorio(my_dcontext); if (waslinking) enter_couldbelinking(my_dcontext, NULL, false); return; } void detach_on_permanent_stack(bool internal, bool do_cleanup) { dcontext_t *my_dcontext; thread_record_t **threads; thread_record_t *my_tr = NULL; int i, num_threads, my_idx = -1; thread_id_t my_id; #ifdef WINDOWS bool detach_stacked_callbacks; bool *cleanup_tpc; #endif DEBUG_DECLARE(bool ok;) DEBUG_DECLARE(int exit_res;) /* synch-all flags: if we fail to suspend a thread (e.g., privilege * problems) ignore it. XXX Should we retry instead? */ /* i#297: we only synch client threads after process exit event. */ uint flags = THREAD_SYNCH_SUSPEND_FAILURE_IGNORE | THREAD_SYNCH_SKIP_CLIENT_THREAD; ENTERING_DR(); /* dynamo_detaching_flag is not really a lock, and since no one ever waits * on it we can't deadlock on it either. */ if (!atomic_compare_exchange(&dynamo_detaching_flag, LOCK_FREE_STATE, LOCK_SET_STATE)) return; /* Unprotect .data for exit cleanup. * XXX: more secure to not do this until we've synched, but then need * alternative prot for started_detach and init_apc_go_native* */ SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); ASSERT(!started_detach); started_detach = true; if (!internal) { synchronize_dynamic_options(); if (!DYNAMO_OPTION(allow_detach)) { started_detach = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); dynamo_detaching_flag = LOCK_FREE_STATE; SYSLOG_INTERNAL_ERROR("Detach called without the allow_detach option set"); EXITING_DR(); return; } } ASSERT(dynamo_initialized); ASSERT(!dynamo_exited); my_id = get_thread_id(); my_dcontext = get_thread_private_dcontext(); ASSERT(my_dcontext != NULL); LOG(GLOBAL, LOG_ALL, 1, "Detach: thread %d starting detach process\n", my_id); SYSLOG(SYSLOG_INFORMATION, INFO_DETACHING, 2, get_application_name(), get_application_pid()); /* synch with flush */ if (my_dcontext != NULL) enter_threadexit(my_dcontext); #ifdef WINDOWS /* Signal to go native at APC init here. Set pause first so that threads * will wait till we are ready for them to go native (after ntdll unpatching). * (To avoid races these must be set in this order!) */ init_apc_go_native_pause = true; init_apc_go_native = true; /* XXX i#2611: there is still a race for threads caught between init_apc_go_native * and dynamo_thread_init adding to all_threads: this just reduces the risk. * Unfortunately we can't easily use the UNIX solution of uninit_thread_count * since we can't distinguish internally vs externally created threads. */ os_thread_yield(); # ifdef CLIENT_INTERFACE wait_for_outstanding_nudges(); # endif #endif /* suspend all DR-controlled threads at safe locations */ if (!synch_with_all_threads(THREAD_SYNCH_SUSPENDED_VALID_MCONTEXT, &threads, &num_threads, /* Case 6821: allow other synch-all-thread uses * that beat us to not wait on us. We still have * a problem if we go first since we must xfer * other threads. */ THREAD_SYNCH_NO_LOCKS_NO_XFER, flags)) { REPORT_FATAL_ERROR_AND_EXIT(my_dcontext, FAILED_TO_SYNCHRONIZE_THREADS, 2, get_application_name(), get_application_pid()); } /* Now we own the thread_initexit_lock. We'll release the locks grabbed in * synch_with_all_threads below after cleaning up all the threads in case we * need to grab it during process exit cleanup. */ ASSERT(mutex_testlock(&all_threads_synch_lock) && mutex_testlock(&thread_initexit_lock)); ASSERT(!doing_detach); doing_detach = true; #ifdef HOT_PATCHING_INTERFACE /* In hotp_only mode, we must remove patches when detaching; we don't want * to leave in all our hooks and detach; that will definitely crash the app. */ if (DYNAMO_OPTION(hotp_only)) hotp_only_detach_helper(); #endif #ifdef WINDOWS /* XXX: maybe we should re-check for additional threads that passed the init_apc * lock but weren't yet initialized and so didn't show up on the list? */ LOG(GLOBAL, LOG_ALL, 1, "Detach : about to unpatch ntdll.dll and fix memory permissions\n"); detach_remove_image_entry_hook(num_threads, threads); if (!INTERNAL_OPTION(noasynch)) { /* We have to do this here, before client exit events, as we're letting * threads go native next. We thus will not detect crashes during client * exit during detach. */ callback_interception_unintercept(); } #endif if (!DYNAMO_OPTION(thin_client)) revert_memory_regions(); #ifdef UNIX unhook_vsyscall(); #endif LOG(GLOBAL, LOG_ALL, 1, "Detach : unpatched ntdll.dll and fixed memory permissions\n"); #ifdef WINDOWS /* Release the APC init lock and let any threads waiting there go native */ LOG(GLOBAL, LOG_ALL, 1, "Detach : Releasing init_apc_go_native_pause\n"); init_apc_go_native_pause = false; #else /* i#2270: we ignore alarm signals during detach to reduce races. */ signal_remove_alarm_handlers(my_dcontext); #endif /* perform exit tasks that require full thread data structs */ dynamo_process_exit_with_thread_info(); #ifdef WINDOWS /* We need to record a bool indicating whether we can free each thread's * resources fully or whether we need them for callback cleanup. */ cleanup_tpc = (bool *)global_heap_alloc(num_threads*sizeof(bool) HEAPACCT(ACCT_OTHER)); /* Handle any outstanding callbacks */ detach_stacked_callbacks = detach_handle_callbacks(num_threads, threads, cleanup_tpc); #endif LOG(GLOBAL, LOG_ALL, 1, "Detach: starting to translate contexts\n"); for (i = 0; i < num_threads; i++) { priv_mcontext_t mc; if (threads[i]->dcontext == my_dcontext) { my_idx = i; my_tr = threads[i]; continue; } else if (IS_CLIENT_THREAD(threads[i]->dcontext)) { /* i#297 we will kill client-owned threads later after app exit events * in dynamo_shared_exit(). */ continue; } else if (detach_do_not_translate(threads[i])) { LOG(GLOBAL, LOG_ALL, 2, "Detach: not translating "TIDFMT"\n", threads[i]->id); } else { LOG(GLOBAL, LOG_ALL, 2, "Detach: translating "TIDFMT"\n", threads[i]->id); DEBUG_DECLARE(ok =) thread_get_mcontext(threads[i], &mc); ASSERT(ok); /* FIXME i#95: this will xl8 to a post-syscall point for a thread at * a syscall, and we rely on the app itself to retry a syscall interrupted * by our suspend signal. This is not good enough, as this is an * artifical signal that the app has not planned for with SA_RESTART or * a loop. We want something like adjust_syscall_for_restart(). * Xref i#1145. */ DEBUG_DECLARE(ok =) translate_mcontext(threads[i], &mc, true/*restore mem*/, NULL/*f*/); ASSERT(ok); if (!threads[i]->under_dynamo_control) { LOG(GLOBAL, LOG_ALL, 1, "Detach : thread "TIDFMT" already running natively\n", threads[i]->id); /* we do need to restore the app ret addr, for native_exec */ if (!DYNAMO_OPTION(thin_client) && DYNAMO_OPTION(native_exec) && !vmvector_empty(native_exec_areas)) { put_back_native_retaddrs(threads[i]->dcontext); } } detach_finalize_translation(threads[i], &mc); LOG(GLOBAL, LOG_ALL, 1, "Detach: pc="PFX" for thread "TIDFMT"\n", mc.pc, threads[i]->id); ASSERT(!is_dynamo_address(mc.pc) && !in_fcache(mc.pc)); /* XXX case 7457: if the thread is suspended after it received a fault * but before the kernel copied the faulting context to the user mode * structures for the handler, it could result in a codemod exception * that wouldn't happen natively! */ DEBUG_DECLARE(ok =) thread_set_mcontext(threads[i], &mc); ASSERT(ok); /* i#249: restore app's PEB/TEB fields */ IF_WINDOWS(restore_peb_pointer_for_thread(threads[i]->dcontext)); } /* Resumes the thread, which will do kernel-visible cleanup of * signal state. Resume happens within the synch_all region where * the thread_initexit_lock is held so that we can clean up thread * data later. */ #ifdef UNIX os_signal_thread_detach(threads[i]->dcontext); #endif LOG(GLOBAL, LOG_ALL, 1, "Detach: thread "TIDFMT" is being resumed as native\n", threads[i]->id); os_thread_resume(threads[i]); } ASSERT(my_idx != -1 || !internal); #ifdef UNIX LOG(GLOBAL, LOG_ALL, 1, "Detach: waiting for threads to fully detach\n"); for (i = 0; i < num_threads; i++) { if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) os_wait_thread_detached(threads[i]->dcontext); } #endif if (!do_cleanup) return; /* Clean up each thread now that everyone has gone native. Needs to be * done with the thread_initexit_lock held, which is true within a synched * region. */ for (i = 0; i < num_threads; i++) { if (i != my_idx && !IS_CLIENT_THREAD(threads[i]->dcontext)) { LOG(GLOBAL, LOG_ALL, 1, "Detach: cleaning up thread "TIDFMT" %s\n", threads[i]->id, IF_WINDOWS_ELSE(cleanup_tpc[i] ? "and its TPC" : "", "")); dynamo_other_thread_exit(threads[i] _IF_WINDOWS(!cleanup_tpc[i])); } } if (my_idx != -1) { /* pre-client thread cleanup (PR 536058) */ dynamo_thread_exit_pre_client(my_dcontext, my_tr->id); } LOG(GLOBAL, LOG_ALL, 1, "Detach: Letting slave threads go native\n"); #ifdef WINDOWS global_heap_free(cleanup_tpc, num_threads*sizeof(bool) HEAPACCT(ACCT_OTHER)); /* XXX: there's a possible race if a thread waiting at APC is still there * when we unload our dll. */ os_thread_yield(); #endif end_synch_with_all_threads(threads, num_threads, false/*don't resume */); threads = NULL; LOG(GLOBAL, LOG_ALL, 1, "Detach: Entering final cleanup and unload\n"); SYSLOG_INTERNAL_INFO("Detaching from process, entering final cleanup"); DEBUG_DECLARE(exit_res =) dynamo_shared_exit(my_tr _IF_WINDOWS(detach_stacked_callbacks)); ASSERT(exit_res == SUCCESS); detach_finalize_cleanup(); stack_free(initstack, DYNAMORIO_STACK_SIZE); dynamo_exit_post_detach(); doing_detach = false; started_detach = false; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); dynamo_detaching_flag = LOCK_FREE_STATE; EXITING_DR(); }
1
12,624
I am afraid this is going to cause problems on Windows where it is not uncommon to have injected threads (CTRL_SHUTDOWN, CTRL_LOGOFF, etc.) we have no privileges to suspend -- and thus retrying will just fail again, and with the new "synchall failure is fatal and should kill the process" approach it turns what used to work for us on Windows into process death.
DynamoRIO-dynamorio
c
@@ -45,6 +45,7 @@ program .option('-C <build_dir>', 'build config (out/Debug, out/Release') .option('--target_arch <target_arch>', 'target architecture', 'x64') .option('--mac_signing_identifier <id>', 'The identifier to use for signing') + .option('--mac_installer_signing_identifier <id>', 'The identifier to use for signing the installer') .option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login') .option('--debug_build <debug_build>', 'keep debugging symbols') .option('--official_build <official_build>', 'force official build settings')
1
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ const program = require('commander'); const path = require('path') const fs = require('fs-extra') const config = require('../lib/config') const util = require('../lib/util') const build = require('../lib/build') const versions = require('../lib/versions') const start = require('../lib/start') const updatePatches = require('../lib/updatePatches') const pullL10n = require('../lib/pullL10n') const pushL10n = require('../lib/pushL10n') const chromiumRebaseL10n = require('../lib/chromiumRebaseL10n') const createDist = require('../lib/createDist') const upload = require('../lib/upload') const test = require('../lib/test') program .version(process.env.npm_package_version) program .command('versions') .action(versions) program .command('build') .option('-C <build_dir>', 'build config (out/Debug, out/Release') .option('--target_arch <target_arch>', 'target architecture', 'x64') .option('--mac_signing_identifier <id>', 'The identifier to use for signing') .option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login') .option('--debug_build <debug_build>', 'keep debugging symbols') .option('--official_build <official_build>', 'force official build settings') .option('--brave_google_api_key <brave_google_api_key>') .option('--brave_google_api_endpoint <brave_google_api_endpoint>') .option('--channel <target_chanel>', 'target channel to build', /^(beta|dev|nightly|release)$/i, 'release') .option('--ignore_compile_failure', 'Keep compiling regardless of error') .arguments('[build_config]') .action(build) program .command('create_dist') .option('-C <build_dir>', 'build config (out/Debug, out/Release') .option('--target_arch <target_arch>', 'target architecture', 'x64') .option('--mac_signing_identifier <id>', 'The identifier to use for signing') .option('--mac_signing_keychain <keychain>', 'The identifier to use for signing', 'login') .option('--debug_build <debug_build>', 'keep debugging symbols') .option('--official_build <official_build>', 'force official build settings') .option('--brave_google_api_key <brave_google_api_key>') .option('--brave_google_api_endpoint <brave_google_api_endpoint>') .option('--channel <target_chanel>', 'target channel to build', /^(beta|dev|nightly|release)$/i, 'release') .arguments('[build_config]') .action(createDist) program .command('upload') .option('--target_arch <target_arch>', 'target architecture', 'x64') .action(upload) program .command('start') .option('--v [log_level]', 'set log level to [log_level]', parseInt, '0') .option('--vmodule [modules]', 'verbose log from specific modules') .option('--user_data_dir_name [base_name]', 'set user data directory base name to [base_name]', 'Brave-Browser-Development') .option('--no_sandbox', 'disable the sandbox') .option('--disable_brave_extension', 'disable loading the Brave extension') .option('--disable_brave_rewards_extension', 'disable loading the Brave Rewards extension') .option('--disable_pdfjs_extension', 'disable loading the PDFJS extension') .option('--ui_mode <ui_mode>', 'which built-in ui appearance mode to use', /^(dark|light)$/i) .option('--show_component_extensions', 'show component extensions in chrome://extensions') .option('--enable_brave_update', 'enable brave update') .option('--channel <target_chanel>', 'target channel to start', /^(beta|dev|nightly|release)$/i, 'release') .option('--official_build <official_build>', 'force official build settings') .option('--single_process', 'use a single process') .arguments('[build_config]') .action(start) program .command('pull_l10n') .action(pullL10n) program .command('push_l10n') .action(pushL10n) program .command('chromium_rebase_l10n') .action(chromiumRebaseL10n) program .command('update_patches') .action(updatePatches) program .command('cibuild') .option('--target_arch <target_arch>', 'target architecture', 'x64') .action((options) => { options.official_build = true build('Release', options) }) program .command('test <suite>') .option('--v [log_level]', 'set log level to [log_level]', parseInt, '0') .option('--filter <filter>', 'set test filter') .option('--output <output>', 'set test output (results) file path') .option('--disable_brave_extension', 'disable loading the Brave extension') .option('--single_process', 'uses a single process to run tests to help with debugging') .option('--test_launcher_jobs <test_launcher_jobs>', 'Number of jobs to launch') .arguments('[build_config]') .action(test) program .parse(process.argv)
1
5,472
think it should only be in create_dist
brave-brave-browser
js
@@ -429,9 +429,14 @@ func (c *NPLController) handleAddUpdatePod(key string, obj interface{}) error { podPorts[port] = struct{}{} portData := c.portTable.GetEntryByPodIPPort(podIP, int(cport.ContainerPort)) if portData == nil { // rule does not exist - nodePort, err = c.portTable.AddRule(podIP, port) - if err != nil { - return fmt.Errorf("failed to add rule for Pod %s: %v", key, err) + if int(cport.HostPort) > 0 { + klog.V(4).Infof("Host Port is defined for Container %s in Pod %s, thus extra NPL port is not allocated", container.Name, key) + nodePort = int(cport.HostPort) + } else { + nodePort, err = c.portTable.AddRule(podIP, port) + if err != nil { + return fmt.Errorf("failed to add rule for Pod %s: %v", key, err) + } } } else { nodePort = portData.NodePort
1
// +build !windows // Copyright 2020 Antrea Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package k8s import ( "encoding/json" "fmt" "reflect" "sync" "time" "github.com/vmware-tanzu/antrea/pkg/agent/nodeportlocal/portcache" "github.com/vmware-tanzu/antrea/pkg/agent/nodeportlocal/rules" utilsets "github.com/vmware-tanzu/antrea/pkg/util/sets" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" "k8s.io/klog" ) const ( controllerName = "AntreaAgentNPLController" minRetryDelay = 2 * time.Second maxRetryDelay = 120 * time.Second numWorkers = 4 ) type NPLController struct { portTable *portcache.PortTable kubeClient clientset.Interface queue workqueue.RateLimitingInterface podInformer cache.SharedIndexInformer podLister corelisters.PodLister svcInformer cache.SharedIndexInformer podToIP map[string]string nodeName string podIPLock sync.RWMutex } func NewNPLController(kubeClient clientset.Interface, podInformer cache.SharedIndexInformer, svcInformer cache.SharedIndexInformer, resyncPeriod time.Duration, pt *portcache.PortTable, nodeName string) *NPLController { c := NPLController{ kubeClient: kubeClient, portTable: pt, podInformer: podInformer, podLister: corelisters.NewPodLister(podInformer.GetIndexer()), svcInformer: svcInformer, podToIP: make(map[string]string), nodeName: nodeName, } podInformer.AddEventHandlerWithResyncPeriod( cache.ResourceEventHandlerFuncs{ AddFunc: c.enqueuePod, DeleteFunc: c.enqueuePod, UpdateFunc: func(old, cur interface{}) { c.enqueuePod(cur) }, }, resyncPeriod, ) svcInformer.AddEventHandlerWithResyncPeriod( cache.ResourceEventHandlerFuncs{ AddFunc: c.enqueueSvc, DeleteFunc: c.enqueueSvc, UpdateFunc: c.enqueueSvcUpdate, }, resyncPeriod, ) svcInformer.AddIndexers( cache.Indexers{ NPLEnabledAnnotationIndex: func(obj interface{}) ([]string, error) { svc, ok := obj.(*corev1.Service) if !ok { return []string{}, nil } if val, ok := svc.GetAnnotations()[NPLEnabledAnnotationKey]; ok { return []string{val}, nil } return []string{}, nil }, }, ) c.queue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(minRetryDelay, maxRetryDelay), "nodeportlocal") return &c } func podKeyFunc(pod *corev1.Pod) string { return pod.Namespace + "/" + pod.Name } // Run starts to watch and process Pod updates for the Node where Antrea Agent is running. // It starts a queue and a fixed number of workers to process the objects from the queue. func (c *NPLController) Run(stopCh <-chan struct{}) { defer func() { klog.Infof("Shutting down %s", controllerName) c.queue.ShutDown() }() klog.Infof("Starting %s", controllerName) go c.podInformer.Run(stopCh) if !cache.WaitForNamedCacheSync(controllerName, stopCh, c.podInformer.HasSynced, c.svcInformer.HasSynced) { return } klog.Info("Will fetch Pods and generate NodePortLocal rules for these Pods") if err := c.GetPodsAndGenRules(); err != nil { klog.Errorf("Error in getting Pods and generating rules: %v", err) return } for i := 0; i < numWorkers; i++ { go wait.Until(c.Worker, time.Second, stopCh) } <-stopCh } func (c *NPLController) syncPod(key string) error { obj, exists, err := c.podInformer.GetIndexer().GetByKey(key) if err != nil { return err } else if exists { return c.handleAddUpdatePod(key, obj) } else { return c.handleRemovePod(key) } } func (c *NPLController) checkDeletedPod(obj interface{}) (*corev1.Pod, error) { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { return nil, fmt.Errorf("received unexpected object: %v", obj) } pod, ok := deletedState.Obj.(*corev1.Pod) if !ok { return nil, fmt.Errorf("DeletedFinalStateUnknown object is not of type Pod: %v", deletedState.Obj) } return pod, nil } func (c *NPLController) enqueuePod(obj interface{}) { pod, isPod := obj.(*corev1.Pod) if !isPod { var err error pod, err = c.checkDeletedPod(obj) if err != nil { klog.Errorf("Got error while processing event update: %v", err) return } } podKey := podKeyFunc(pod) c.queue.Add(podKey) } func (c *NPLController) checkDeletedSvc(obj interface{}) (*corev1.Service, error) { deletedState, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { return nil, fmt.Errorf("received unexpected object: %v", obj) } svc, ok := deletedState.Obj.(*corev1.Service) if !ok { return nil, fmt.Errorf("DeletedFinalStateUnknown object is not of type Service: %v", deletedState.Obj) } return svc, nil } func (c *NPLController) enqueueSvcUpdate(oldObj, newObj interface{}) { // In case where the app selector in Service gets updated from one valid selector to another // both sets of Pods (corresponding to old and new selector) need to be considered. newSvc := newObj.(*corev1.Service) oldSvc := oldObj.(*corev1.Service) oldSvcAnnotation := oldSvc.Annotations[NPLEnabledAnnotationKey] newSvcAnnotation := newSvc.Annotations[NPLEnabledAnnotationKey] // Return if both Services do not have the NPL annotation. if oldSvcAnnotation != "true" && newSvcAnnotation != "true" { return } if newSvcAnnotation == "true" && newSvc.Spec.Type == corev1.ServiceTypeNodePort { klog.Warningf("Service %s is of type NodePort and cannot be used for NodePortLocal, the '%s' annotation will have no effect", newSvc.Name, NPLEnabledAnnotationKey) } podKeys := sets.String{} oldNPLEnabled := oldSvcAnnotation == "true" && oldSvc.Spec.Type != corev1.ServiceTypeNodePort newNPLEnabled := newSvcAnnotation == "true" && newSvc.Spec.Type != corev1.ServiceTypeNodePort if oldNPLEnabled != newNPLEnabled { // Process Pods corresponding to Service with valid NPL annotation and Service type. if oldNPLEnabled { podKeys = sets.NewString(c.getPodsFromService(oldSvc)...) } else if newNPLEnabled { podKeys = sets.NewString(c.getPodsFromService(newSvc)...) } } else if oldNPLEnabled && newNPLEnabled && !reflect.DeepEqual(oldSvc.Spec.Selector, newSvc.Spec.Selector) { // Disjunctive union of Pods from both Service sets. oldPodSet := sets.NewString(c.getPodsFromService(oldSvc)...) newPodSet := sets.NewString(c.getPodsFromService(newSvc)...) podKeys = utilsets.SymmetricDifference(oldPodSet, newPodSet) } for podKey := range podKeys { c.queue.Add(podKey) } } func (c *NPLController) enqueueSvc(obj interface{}) { svc, isSvc := obj.(*corev1.Service) if !isSvc { var err error svc, err = c.checkDeletedSvc(obj) if err != nil { klog.Errorf("Got error while processing event update: %v", err) return } } // Process Pods corresponding to Service with valid NPL annotation. if svc.Annotations[NPLEnabledAnnotationKey] == "true" { for _, podKey := range c.getPodsFromService(svc) { c.queue.Add(podKey) } } } func (c *NPLController) getPodsFromService(svc *corev1.Service) []string { var pods []string // Handling Service without selectors. if len(svc.Spec.Selector) == 0 { return pods } podList, err := c.podLister.Pods(svc.Namespace).List(labels.SelectorFromSet(labels.Set(svc.Spec.Selector))) if err != nil { klog.Errorf("Got error while listing Pods: %v", err) return pods } for _, pod := range podList { pods = append(pods, podKeyFunc(pod)) } return pods } func (c *NPLController) isNPLEnabledForServiceOfPod(obj interface{}) bool { pod := obj.(*corev1.Pod) services, err := c.svcInformer.GetIndexer().ByIndex(NPLEnabledAnnotationIndex, "true") if err != nil { klog.Errorf("Got error while listing Services with annotation %s: %v", NPLEnabledAnnotationKey, err) return false } for _, service := range services { svc, isSvc := service.(*corev1.Service) // Selecting Services NOT of type NodePort, with Service selector matching Pod labels. if isSvc && svc.Spec.Type != corev1.ServiceTypeNodePort { if pod.Namespace == svc.Namespace && matchSvcSelectorPodLabels(svc.Spec.Selector, pod.GetLabels()) { return true } } } return false } // matchSvcSelectorPodLabels verifies that all key/value pairs present in Service's selector // are also present in Pod's labels. func matchSvcSelectorPodLabels(svcSelector, podLabel map[string]string) bool { // Handling Service without selectors. if len(svcSelector) == 0 { return false } for selectorKey, selectorVal := range svcSelector { if labelVal, ok := podLabel[selectorKey]; !ok || selectorVal != labelVal { return false } } return true } func (c *NPLController) Worker() { for c.processNextWorkItem() { } } func (c *NPLController) processNextWorkItem() bool { obj, quit := c.queue.Get() if quit { return false } defer c.queue.Done(obj) if key, ok := obj.(string); !ok { c.queue.Forget(obj) klog.Errorf("Expected string in work queue but got %#v", obj) return true } else if err := c.syncPod(key); err == nil { klog.V(2).Infof("Successfully processed key: %s, in queue", key) c.queue.Forget(key) } else { c.queue.AddRateLimited(key) klog.Errorf("Error syncing Pod %s, requeuing. Error: %v", key, err) } return true } func (c *NPLController) getPodIPFromCache(key string) (string, bool) { c.podIPLock.RLock() defer c.podIPLock.RUnlock() podIP, found := c.podToIP[key] return podIP, found } func (c *NPLController) addPodIPToCache(key, podIP string) { c.podIPLock.Lock() defer c.podIPLock.Unlock() c.podToIP[key] = podIP } func (c *NPLController) deletePodIPFromCache(key string) { c.podIPLock.Lock() defer c.podIPLock.Unlock() delete(c.podToIP, key) } func (c *NPLController) deleteAllPortRulesIfAny(podIP string) error { data := c.portTable.GetDataForPodIP(podIP) for _, d := range data { err := c.portTable.DeleteRule(d.PodIP, int(d.PodPort)) if err != nil { return err } } return nil } // handleRemovePod removes rules from port table and // rules programmed in the system based on implementation type (e.g. IPTABLES). // This also removes Pod annotation from Pods that are not selected by Service annotation. func (c *NPLController) handleRemovePod(key string) error { klog.V(2).Infof("Got delete event for Pod: %s", key) podIP, found := c.getPodIPFromCache(key) if !found { klog.Infof("IP address not found for Pod: %s", key) return nil } if err := c.deleteAllPortRulesIfAny(podIP); err != nil { return err } c.deletePodIPFromCache(key) return nil } // handleAddUpdatePod handles Pod Add, Update events and updates annotation if required. func (c *NPLController) handleAddUpdatePod(key string, obj interface{}) error { pod := obj.(*corev1.Pod) klog.V(2).Infof("Got add/update event for Pod: %s", key) podIP := pod.Status.PodIP if podIP == "" { klog.Infof("IP address not set for Pod: %s", key) return nil } c.addPodIPToCache(key, podIP) if !c.isNPLEnabledForServiceOfPod(obj) { if err := c.deleteAllPortRulesIfAny(podIP); err != nil { return err } if _, exists := pod.Annotations[NPLAnnotationKey]; exists { return c.cleanupNPLAnnotationForPod(pod) } return nil } klog.V(2).Infof("Pod %s is selected by a Service for which NodePortLocal is enabled", key) var err error var nodePort int podPorts := make(map[int]struct{}) podContainers := pod.Spec.Containers nplAnnotations := []NPLAnnotation{} podAnnotation, nplExists := pod.GetAnnotations()[NPLAnnotationKey] if nplExists { if err := json.Unmarshal([]byte(podAnnotation), &nplAnnotations); err != nil { klog.Warningf("Unable to unmarshal NodePortLocal annotation for Pod %s", key) return nil } } nplAnnotationsRequired := []NPLAnnotation{} // first, check which rules are needed based on the Pod specification (ignoring NPL // annotations) and make sure they are present. As we do so, we build the expected list of // NPL annotations for the Pod. for _, container := range podContainers { for _, cport := range container.Ports { port := int(cport.ContainerPort) podPorts[port] = struct{}{} portData := c.portTable.GetEntryByPodIPPort(podIP, int(cport.ContainerPort)) if portData == nil { // rule does not exist nodePort, err = c.portTable.AddRule(podIP, port) if err != nil { return fmt.Errorf("failed to add rule for Pod %s: %v", key, err) } } else { nodePort = portData.NodePort } nplAnnotationsRequired = append(nplAnnotationsRequired, NPLAnnotation{ PodPort: port, NodeIP: pod.Status.HostIP, NodePort: nodePort, }) } } // second, delete any existing rule that is not needed based on the current Pod // specification. entries := c.portTable.GetDataForPodIP(podIP) if nplExists { for _, data := range entries { if _, exists := podPorts[data.PodPort]; !exists { err := c.portTable.DeleteRule(podIP, int(data.PodPort)) if err != nil { return fmt.Errorf("failed to delete rule for Pod IP %s, Pod Port %d: %v", podIP, data.PodPort, err) } } } } // finally, we can check if the current annotation matches the expected one (which we built // in the first step). If not, the Pod needed to be patched. updatePodAnnotation := !compareNPLAnnotationLists(nplAnnotations, nplAnnotationsRequired) if updatePodAnnotation { return c.updatePodNPLAnnotation(pod, nplAnnotationsRequired) } return nil } // GetPodsAndGenRules fetches all the Pods on this Node and looks for valid NodePortLocal // annotations. If they exist, with a valid Node port, it adds the Node port to the port table and // rules. If the NodePortLocal annotation is invalid (cannot be unmarshalled), the annotation is // cleared. If the Node port is invalid (maybe the port range was changed and the Agent was // restarted), the annotation is ignored and will be removed by the Pod event handlers. The Pod // event handlers will also take care of allocating a new Node port if required. func (c *NPLController) GetPodsAndGenRules() error { podList, err := c.podLister.List(labels.Everything()) if err != nil { return fmt.Errorf("error in fetching the Pods for Node %s: %v", c.nodeName, err) } allNPLPorts := []rules.PodNodePort{} for i := range podList { // For each Pod: // check if a valid NodePortLocal annotation exists for this Pod: // if yes, verifiy validity of the Node port, update the port table and add a rule to the // rules buffer. pod := podList[i] annotations := pod.GetAnnotations() nplAnnotation, ok := annotations[NPLAnnotationKey] if !ok { continue } nplData := []NPLAnnotation{} err := json.Unmarshal([]byte(nplAnnotation), &nplData) if err != nil { // if there's an error in this NodePortLocal annotation, clean it up err := c.cleanupNPLAnnotationForPod(pod) if err != nil { return err } continue } for _, npl := range nplData { if npl.NodePort > c.portTable.EndPort || npl.NodePort < c.portTable.StartPort { // ignoring annotation for now, it will be removed by the first call // to handleAddUpdatePod klog.V(2).Infof("Found invalid NodePortLocal annotation for Pod %s/%s: %s, ignoring it", pod.Namespace, pod.Name, nplAnnotation) continue } else { allNPLPorts = append(allNPLPorts, rules.PodNodePort{ NodePort: npl.NodePort, PodPort: npl.PodPort, PodIP: pod.Status.PodIP, }) } } } if err := c.addRulesForNPLPorts(allNPLPorts); err != nil { return err } return nil } func (c *NPLController) addRulesForNPLPorts(allNPLPorts []rules.PodNodePort) error { for _, nplPort := range allNPLPorts { c.portTable.AddUpdateEntry(nplPort.NodePort, nplPort.PodPort, nplPort.PodIP) } if err := c.portTable.PodPortRules.AddAllRules(allNPLPorts); err != nil { return err } return nil } // cleanupNPLAnnotationForPod removes the NodePortLocal annotation from the Pod's annotations map entirely. func (c *NPLController) cleanupNPLAnnotationForPod(pod *corev1.Pod) error { _, ok := pod.Annotations[NPLAnnotationKey] if !ok { return nil } return patchPod(nil, pod, c.kubeClient) }
1
35,093
Do you plan to support Pod spec change? Like hostPort is added/removed later after Pod creation?
antrea-io-antrea
go
@@ -157,13 +157,14 @@ func (tlf *TLF) GetFileInformation(ctx context.Context, fi *dokan.FileInfo) (st } // open tries to open a file. -func (tlf *TLF) open(ctx context.Context, oc *openContext, path []string) (dokan.File, bool, error) { +func (tlf *TLF) open(ctx context.Context, oc *openContext, path []string) ( + dokan.File, dokan.CreateStatus, error) { if len(path) == 0 { if err := oc.ReturningDirAllowed(); err != nil { - return nil, true, err + return nil, 0, err } tlf.refcount.Increase() - return tlf, true, nil + return tlf, dokan.ExistingDir, nil } mode := libkbfs.ReadMode
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libdokan import ( "sync" "time" "github.com/keybase/kbfs/dokan" "github.com/keybase/kbfs/libfs" "github.com/keybase/kbfs/libkbfs" "github.com/keybase/kbfs/tlf" "golang.org/x/net/context" ) // TLF represents the root directory of a TLF. It wraps a lazy-loaded // Dir. type TLF struct { refcount refcount folder *Folder dirLock sync.RWMutex dir *Dir emptyFile } func newTLF(fl *FolderList, h *libkbfs.TlfHandle, name tlf.PreferredName) *TLF { folder := newFolder(fl, h, name) tlf := &TLF{ folder: folder, } tlf.refcount.Increase() return tlf } func (tlf *TLF) getStoredDir() *Dir { tlf.dirLock.RLock() defer tlf.dirLock.RUnlock() return tlf.dir } func (tlf *TLF) loadDirHelper(ctx context.Context, info string, mode libkbfs.ErrorModeType, filterErr bool) ( dir *Dir, exitEarly bool, err error) { dir = tlf.getStoredDir() if dir != nil { return dir, false, nil } tlf.dirLock.Lock() defer tlf.dirLock.Unlock() // Need to check for nilness again to avoid racing with other // calls to loadDir(). if tlf.dir != nil { return tlf.dir, false, nil } name := tlf.folder.name() tlf.folder.fs.log.CDebugf(ctx, "Loading root directory for folder %s "+ "(type: %s, filter error: %t) for %s", name, tlf.folder.list.tlfType, filterErr, info) defer func() { if filterErr { exitEarly, err = libfs.FilterTLFEarlyExitError(ctx, err, tlf.folder.fs.log, name) } tlf.folder.reportErr(ctx, mode, err) }() handle, err := tlf.folder.resolve(ctx) if err != nil { return nil, false, err } var rootNode libkbfs.Node if filterErr { rootNode, _, err = tlf.folder.fs.config.KBFSOps().GetRootNode( ctx, handle, libkbfs.MasterBranch) if err != nil { return nil, false, err } // If not fake an empty directory. if rootNode == nil { return nil, false, libfs.TlfDoesNotExist{} } } else { rootNode, _, err = tlf.folder.fs.config.KBFSOps().GetOrCreateRootNode( ctx, handle, libkbfs.MasterBranch) if err != nil { return nil, false, err } } err = tlf.folder.setFolderBranch(rootNode.GetFolderBranch()) if err != nil { return nil, false, err } tlf.folder.nodes[rootNode.GetID()] = tlf tlf.dir = newDir(tlf.folder, rootNode, string(name), nil) // TLFs should be cached. tlf.dir.refcount.Increase() tlf.folder.lockedAddNode(rootNode, tlf.dir) return tlf.dir, false, nil } func (tlf *TLF) loadDir(ctx context.Context, info string) (*Dir, error) { dir, _, err := tlf.loadDirHelper(ctx, info, libkbfs.WriteMode, false) return dir, err } // loadDirAllowNonexistent loads a TLF if it's not already loaded. If // the TLF doesn't yet exist, it still returns a nil error and // indicates that the calling function should pretend it's an empty // folder. func (tlf *TLF) loadDirAllowNonexistent(ctx context.Context, info string) ( *Dir, bool, error) { return tlf.loadDirHelper(ctx, info, libkbfs.ReadMode, true) } // SetFileTime sets mtime for FSOs (File and Dir). func (tlf *TLF) SetFileTime(ctx context.Context, fi *dokan.FileInfo, creation time.Time, lastAccess time.Time, lastWrite time.Time) (err error) { tlf.folder.fs.logEnter(ctx, "TLF SetFileTime") dir, err := tlf.loadDir(ctx, "TLF SetFileTime") if err != nil { return err } return dir.SetFileTime(ctx, fi, creation, lastAccess, lastWrite) } // SetFileAttributes for Dokan. func (tlf *TLF) SetFileAttributes(ctx context.Context, fi *dokan.FileInfo, fileAttributes dokan.FileAttribute) error { tlf.folder.fs.logEnter(ctx, "TLF SetFileAttributes") dir, err := tlf.loadDir(ctx, "TLF SetFileAttributes") if err != nil { return err } return dir.SetFileAttributes(ctx, fi, fileAttributes) } // GetFileInformation for dokan. func (tlf *TLF) GetFileInformation(ctx context.Context, fi *dokan.FileInfo) (st *dokan.Stat, err error) { dir := tlf.getStoredDir() if dir == nil { return defaultDirectoryInformation() } return dir.GetFileInformation(ctx, fi) } // open tries to open a file. func (tlf *TLF) open(ctx context.Context, oc *openContext, path []string) (dokan.File, bool, error) { if len(path) == 0 { if err := oc.ReturningDirAllowed(); err != nil { return nil, true, err } tlf.refcount.Increase() return tlf, true, nil } mode := libkbfs.ReadMode if oc.isCreation() { mode = libkbfs.WriteMode } // If it is a creation then we need the dir for real. dir, exitEarly, err := tlf.loadDirHelper(ctx, "open", mode, !oc.isCreation()) if err != nil { return nil, false, err } if exitEarly { specialNode := handleTLFSpecialFile(lastStr(path), tlf.folder) if specialNode != nil { return specialNode, false, nil } return nil, false, dokan.ErrObjectNameNotFound } return dir.open(ctx, oc, path) } // FindFiles does readdir for dokan. func (tlf *TLF) FindFiles(ctx context.Context, fi *dokan.FileInfo, pattern string, callback func(*dokan.NamedStat) error) (err error) { tlf.folder.fs.logEnter(ctx, "TLF FindFiles") dir, exitEarly, err := tlf.loadDirAllowNonexistent(ctx, "FindFiles") if err != nil { return errToDokan(err) } if exitEarly { return dokan.ErrObjectNameNotFound } return dir.FindFiles(ctx, fi, pattern, callback) } // CanDeleteDirectory - return just nil because tlfs // can always be removed from favorites. func (tlf *TLF) CanDeleteDirectory(ctx context.Context, fi *dokan.FileInfo) (err error) { return nil } // Cleanup - forget references, perform deletions etc. func (tlf *TLF) Cleanup(ctx context.Context, fi *dokan.FileInfo) { var err error if fi != nil && fi.IsDeleteOnClose() { tlf.folder.handleMu.Lock() fav := tlf.folder.h.ToFavorite() tlf.folder.handleMu.Unlock() tlf.folder.fs.log.CDebugf(ctx, "TLF Removing favorite %q", fav.Name) defer func() { tlf.folder.reportErr(ctx, libkbfs.WriteMode, err) }() err = tlf.folder.fs.config.KBFSOps().DeleteFavorite(ctx, fav) } if tlf.refcount.Decrease() { dir := tlf.getStoredDir() if dir == nil { return } dir.Cleanup(ctx, fi) } }
1
19,468
Is this behavior correct? It used to return `true`, which should map to `dokan.ExistingDir`. Was that previously a bug?
keybase-kbfs
go
@@ -27,7 +27,11 @@ void deleteRegion(const storage::SharedRegionRegister::ShmKey key) void listRegions() { - + if (!storage::SharedMonitor<storage::SharedRegionRegister>::exists()) + { + osrm::util::Log() << "No shared memory regions found. Try running osrm-datastore"; + return; + } storage::SharedMonitor<storage::SharedRegionRegister> monitor; std::vector<std::string> names; const auto &shared_register = monitor.data();
1
#include "storage/shared_memory.hpp" #include "storage/shared_monitor.hpp" #include "storage/storage.hpp" #include "osrm/exception.hpp" #include "util/log.hpp" #include "util/meminfo.hpp" #include "util/typedefs.hpp" #include "util/version.hpp" #include <boost/filesystem.hpp> #include <boost/program_options.hpp> #include <csignal> #include <cstdlib> using namespace osrm; void removeLocks() { storage::SharedMonitor<storage::SharedRegionRegister>::remove(); } void deleteRegion(const storage::SharedRegionRegister::ShmKey key) { if (storage::SharedMemory::RegionExists(key) && !storage::SharedMemory::Remove(key)) { util::Log(logWARNING) << "could not delete shared memory region " << static_cast<int>(key); } } void listRegions() { storage::SharedMonitor<storage::SharedRegionRegister> monitor; std::vector<std::string> names; const auto &shared_register = monitor.data(); shared_register.List(std::back_inserter(names)); osrm::util::Log() << "name\tshm key\ttimestamp\tsize"; for (const auto &name : names) { auto id = shared_register.Find(name); auto region = shared_register.GetRegion(id); auto shm = osrm::storage::makeSharedMemory(region.shm_key); osrm::util::Log() << name << "\t" << static_cast<int>(region.shm_key) << "\t" << region.timestamp << "\t" << shm->Size(); } } void springClean() { osrm::util::Log() << "Releasing all locks"; osrm::util::Log() << "ATTENTION! BE CAREFUL!"; osrm::util::Log() << "----------------------"; osrm::util::Log() << "This tool may put osrm-routed into an undefined state!"; osrm::util::Log() << "Type 'Y' to acknowledge that you know what your are doing."; osrm::util::Log() << "\n\nDo you want to purge all shared memory allocated " << "by osrm-datastore? [type 'Y' to confirm]"; const auto letter = getchar(); if (letter != 'Y') { osrm::util::Log() << "aborted."; } else { for (auto key : util::irange<std::uint8_t>(0, storage::SharedRegionRegister::MAX_SHM_KEYS)) { deleteRegion(key); } removeLocks(); } } // generate boost::program_options object for the routing part bool generateDataStoreOptions(const int argc, const char *argv[], std::string &verbosity, boost::filesystem::path &base_path, int &max_wait, std::string &dataset_name, bool &list_datasets, bool &only_metric) { // declare a group of options that will be allowed only on command line boost::program_options::options_description generic_options("Options"); generic_options.add_options() // ("version,v", "Show version") // ("help,h", "Show this help message") // ("verbosity,l", boost::program_options::value<std::string>(&verbosity)->default_value("INFO"), std::string("Log verbosity level: " + util::LogPolicy::GetLevels()).c_str()) // ("remove-locks,r", "Remove locks") // ("spring-clean,s", "Spring-cleaning all shared memory regions"); // declare a group of options that will be allowed both on command line // as well as in a config file boost::program_options::options_description config_options("Configuration"); config_options.add_options() // ("max-wait", boost::program_options::value<int>(&max_wait)->default_value(-1), "Maximum number of seconds to wait on a running data update " "before aquiring the lock by force.") // ("dataset-name", boost::program_options::value<std::string>(&dataset_name)->default_value(""), "Name of the dataset to load into memory. This allows having multiple datasets in memory " "at the same time.") // ("list", boost::program_options::value<bool>(&list_datasets) ->default_value(false) ->implicit_value(true), "Name of the dataset to load into memory. This allows having multiple datasets in memory " "at the same time.") // ("only-metric", boost::program_options::value<bool>(&only_metric) ->default_value(false) ->implicit_value(true), "Only reload the metric data without updating the full dataset. This is an optimization " "for traffic updates."); // hidden options, will be allowed on command line but will not be shown to the user boost::program_options::options_description hidden_options("Hidden options"); hidden_options.add_options()("base,b", boost::program_options::value<boost::filesystem::path>(&base_path), "base path to .osrm file"); // positional option boost::program_options::positional_options_description positional_options; positional_options.add("base", 1); // combine above options for parsing boost::program_options::options_description cmdline_options; cmdline_options.add(generic_options).add(config_options).add(hidden_options); const auto *executable = argv[0]; boost::program_options::options_description visible_options( boost::filesystem::path(executable).filename().string() + " [<options>] <configuration>"); visible_options.add(generic_options).add(config_options); // print help options if no infile is specified if (argc < 2) { util::Log() << visible_options; return false; } // parse command line options boost::program_options::variables_map option_variables; try { boost::program_options::store(boost::program_options::command_line_parser(argc, argv) .options(cmdline_options) .positional(positional_options) .run(), option_variables); } catch (const boost::program_options::error &e) { util::Log(logERROR) << e.what(); return false; } if (option_variables.count("version")) { util::Log() << OSRM_VERSION; return false; } if (option_variables.count("help")) { util::Log() << visible_options; return false; } if (option_variables.count("remove-locks")) { removeLocks(); return false; } if (option_variables.count("spring-clean")) { springClean(); return false; } boost::program_options::notify(option_variables); return true; } [[noreturn]] void CleanupSharedBarriers(int signum) { // Here the lock state of named mutexes is unknown, make a hard cleanup removeLocks(); std::_Exit(128 + signum); } int main(const int argc, const char *argv[]) try { int signals[] = {SIGTERM, SIGSEGV, SIGINT, SIGILL, SIGABRT, SIGFPE}; for (auto sig : signals) { std::signal(sig, CleanupSharedBarriers); } util::LogPolicy::GetInstance().Unmute(); std::string verbosity; boost::filesystem::path base_path; int max_wait = -1; std::string dataset_name; bool list_datasets = false; bool only_metric = false; if (!generateDataStoreOptions( argc, argv, verbosity, base_path, max_wait, dataset_name, list_datasets, only_metric)) { return EXIT_SUCCESS; } util::LogPolicy::GetInstance().SetLevel(verbosity); if (list_datasets) { listRegions(); return EXIT_SUCCESS; } storage::StorageConfig config(base_path); if (!config.IsValid()) { util::Log(logERROR) << "Config contains invalid file paths. Exiting!"; return EXIT_FAILURE; } storage::Storage storage(std::move(config)); return storage.Run(max_wait, dataset_name, only_metric); } catch (const osrm::RuntimeError &e) { util::Log(logERROR) << e.what(); return e.GetCode(); } catch (const std::bad_alloc &e) { util::DumpMemoryStats(); util::Log(logERROR) << "[exception] " << e.what(); util::Log(logERROR) << "Please provide more memory or disable locking the virtual " "address space (note: this makes OSRM swap, i.e. slow)"; return EXIT_FAILURE; }
1
23,691
What about just printing an empty list in this case? That would make the output more predictable.
Project-OSRM-osrm-backend
cpp
@@ -24,7 +24,6 @@ export default Component.extend({ init() { this._super(...arguments); - this.container = document.querySelector('.gh-editor-container')[0]; let mobiledoc = this.get('value') || BLANK_DOC; let userCards = this.get('cards') || [];
1
import Component from 'ember-component'; import {A as emberA} from 'ember-array/utils'; import run from 'ember-runloop'; import layout from '../templates/components/gh-koenig'; import Mobiledoc from 'mobiledoc-kit'; import {MOBILEDOC_VERSION} from 'mobiledoc-kit/renderers/mobiledoc'; import createCardFactory from '../lib/card-factory'; import defaultCommands from '../options/default-commands'; import editorCards from '../cards/index'; // import { VALID_MARKUP_SECTION_TAGNAMES } from 'mobiledoc-kit/models/markup-section'; //the block elements supported by mobile-doc export const BLANK_DOC = { version: MOBILEDOC_VERSION, atoms: [], markups: [], cards: [], sections: [] }; export default Component.extend({ layout, classNames: ['editor-holder'], emberCards: emberA([]), init() { this._super(...arguments); this.container = document.querySelector('.gh-editor-container')[0]; let mobiledoc = this.get('value') || BLANK_DOC; let userCards = this.get('cards') || []; if (typeof mobiledoc === 'string') { mobiledoc = JSON.parse(mobiledoc); } // if the doc is cached then the editor is loaded and we don't need to continue. if (this._cachedDoc && this._cachedDoc === mobiledoc) { return; } let createCard = createCardFactory.apply(this, {}); // need to pass the toolbar let options = { mobiledoc, // temp cards: createCard(editorCards.concat(userCards)), atoms: [{ name: 'soft-return', type: 'dom', render() { return document.createElement('br'); } }], spellcheck: true, autofocus: this.get('shouldFocusEditor'), placeholder: 'Click here to start ...' }; this.editor = new Mobiledoc.Editor(options); }, willRender() { if (this._rendered) { return; } let {editor} = this; editor.willRender(() => { // console.log(Ember.run.currentRunLoop); // if (!Ember.run.currentRunLoop) { // this._startedRunLoop = true; // Ember.run.begin(); // } }); editor.didRender(() => { this.sendAction('loaded', editor); // Ember.run.end(); }); editor.postDidChange(()=> { run.join(() => { // store a cache of the local doc so that we don't need to reinitialise it. this._cachedDoc = editor.serialize(MOBILEDOC_VERSION); this.sendAction('onChange', this._cachedDoc); if (this._cachedDoc !== BLANK_DOC && !this._firstChange) { this._firstChange = true; this.sendAction('onFirstChange', this._cachedDoc); } }); }); }, didRender() { if (this._rendered) { return; } let [editorDom] = this.$('.surface'); this.domContainer = editorDom.parentNode.parentNode.parentNode.parentNode; // nasty nasty nasty. this.editor.render(editorDom); this._rendered = true; window.editor = this.editor; defaultCommands(this.editor); // initialise the custom text handlers for MD, etc. // shouldFocusEditor is only true when transitioning from new to edit, otherwise it's false or undefined. // therefore, if it's true it's after the first lot of content is entered and we expect the caret to be at the // end of the document. if (this.get('shouldFocusEditor')) { let range = document.createRange(); range.selectNodeContents(this.editor.element); range.collapse(false); let sel = window.getSelection(); sel.removeAllRanges(); sel.addRange(range); this.editor._ensureFocus(); } this.editor.cursorDidChange(() => this.cursorMoved()); }, // drag and drop images onto the editor drop(event) { if (event.dataTransfer.files.length) { event.preventDefault(); for (let i = 0; i < event.dataTransfer.files.length; i++) { let file = [event.dataTransfer.files[i]]; this.editor.insertCard('image-card', {pos: 'top', file}); } } }, // makes sure the cursor is on screen. cursorMoved() { let scrollBuffer = 33; // the extra buffer to scroll. let range = window.getSelection().getRangeAt(0); // get the actual range within the DOM. let position = range.getBoundingClientRect(); let windowHeight = window.innerHeight; if (position.bottom > windowHeight) { this.domContainer.scrollTop += position.bottom - windowHeight + scrollBuffer; } else if (position.top < 0) { this.domContainer.scrollTop += position.top - scrollBuffer; } }, willDestroy() { this.editor.destroy(); } });
1
7,925
I looked and couldn't find any usage of `container` in any of the editor component files (js or hbs), so I assume this was used once and didn't get removed?
TryGhost-Admin
js
@@ -256,11 +256,11 @@ function roots_request_filter($query_vars) { add_filter('request', 'roots_request_filter'); /** - * Tell WordPress to use searchform.php from the templates/ directory + * Tell WordPress to use searchform.php from the templates/ directory. Requires WordPress 3.6+ */ -function roots_get_search_form($argument) { - if ($argument === '') { - locate_template('/templates/searchform.php', true, false); - } +function roots_get_search_form($form) { + $form = ''; + locate_template('/templates/searchform.php', true, false); + return $form; } add_filter('get_search_form', 'roots_get_search_form');
1
<?php /** * Clean up wp_head() * * Remove unnecessary <link>'s * Remove inline CSS used by Recent Comments widget * Remove inline CSS used by posts with galleries * Remove self-closing tag and change ''s to "'s on rel_canonical() */ function roots_head_cleanup() { // Originally from http://wpengineer.com/1438/wordpress-header/ remove_action('wp_head', 'feed_links', 2); remove_action('wp_head', 'feed_links_extra', 3); remove_action('wp_head', 'rsd_link'); remove_action('wp_head', 'wlwmanifest_link'); remove_action('wp_head', 'adjacent_posts_rel_link_wp_head', 10, 0); remove_action('wp_head', 'wp_generator'); remove_action('wp_head', 'wp_shortlink_wp_head', 10, 0); global $wp_widget_factory; remove_action('wp_head', array($wp_widget_factory->widgets['WP_Widget_Recent_Comments'], 'recent_comments_style')); if (!class_exists('WPSEO_Frontend')) { remove_action('wp_head', 'rel_canonical'); add_action('wp_head', 'roots_rel_canonical'); } } function roots_rel_canonical() { global $wp_the_query; if (!is_singular()) { return; } if (!$id = $wp_the_query->get_queried_object_id()) { return; } $link = get_permalink($id); echo "\t<link rel=\"canonical\" href=\"$link\">\n"; } add_action('init', 'roots_head_cleanup'); /** * Remove the WordPress version from RSS feeds */ add_filter('the_generator', '__return_false'); /** * Clean up language_attributes() used in <html> tag * * Change lang="en-US" to lang="en" * Remove dir="ltr" */ function roots_language_attributes() { $attributes = array(); $output = ''; if (function_exists('is_rtl')) { if (is_rtl() == 'rtl') { $attributes[] = 'dir="rtl"'; } } $lang = get_bloginfo('language'); if ($lang && $lang !== 'en-US') { $attributes[] = "lang=\"$lang\""; } else { $attributes[] = 'lang="en"'; } $output = implode(' ', $attributes); $output = apply_filters('roots_language_attributes', $output); return $output; } add_filter('language_attributes', 'roots_language_attributes'); /** * Manage output of wp_title() */ function roots_wp_title($title) { if (is_feed()) { return $title; } $title .= get_bloginfo('name'); return $title; } add_filter('wp_title', 'roots_wp_title', 10); /** * Clean up output of stylesheet <link> tags */ function roots_clean_style_tag($input) { preg_match_all("!<link rel='stylesheet'\s?(id='[^']+')?\s+href='(.*)' type='text/css' media='(.*)' />!", $input, $matches); // Only display media if it is meaningful $media = $matches[3][0] !== '' && $matches[3][0] !== 'all' ? ' media="' . $matches[3][0] . '"' : ''; return '<link rel="stylesheet" href="' . $matches[2][0] . '"' . $media . '>' . "\n"; } add_filter('style_loader_tag', 'roots_clean_style_tag'); /** * Add and remove body_class() classes */ function roots_body_class($classes) { // Add post/page slug if (is_single() || is_page() && !is_front_page()) { $classes[] = basename(get_permalink()); } // Remove unnecessary classes $home_id_class = 'page-id-' . get_option('page_on_front'); $remove_classes = array( 'page-template-default', $home_id_class ); $classes = array_diff($classes, $remove_classes); return $classes; } add_filter('body_class', 'roots_body_class'); /** * Wrap embedded media as suggested by Readability * * @link https://gist.github.com/965956 * @link http://www.readability.com/publishers/guidelines#publisher */ function roots_embed_wrap($cache, $url, $attr = '', $post_ID = '') { return '<div class="entry-content-asset">' . $cache . '</div>'; } add_filter('embed_oembed_html', 'roots_embed_wrap', 10, 4); /** * Add Bootstrap thumbnail styling to images with captions * Use <figure> and <figcaption> * * @link http://justintadlock.com/archives/2011/07/01/captions-in-wordpress */ function roots_caption($output, $attr, $content) { if (is_feed()) { return $output; } $defaults = array( 'id' => '', 'align' => 'alignnone', 'width' => '', 'caption' => '' ); $attr = shortcode_atts($defaults, $attr); // If the width is less than 1 or there is no caption, return the content wrapped between the [caption] tags if ($attr['width'] < 1 || empty($attr['caption'])) { return $content; } // Set up the attributes for the caption <figure> $attributes = (!empty($attr['id']) ? ' id="' . esc_attr($attr['id']) . '"' : '' ); $attributes .= ' class="thumbnail wp-caption ' . esc_attr($attr['align']) . '"'; $attributes .= ' style="width: ' . esc_attr($attr['width']) . 'px"'; $output = '<figure' . $attributes .'>'; $output .= do_shortcode($content); $output .= '<figcaption class="caption wp-caption-text">' . $attr['caption'] . '</figcaption>'; $output .= '</figure>'; return $output; } add_filter('img_caption_shortcode', 'roots_caption', 10, 3); /** * Remove unnecessary dashboard widgets * * @link http://www.deluxeblogtips.com/2011/01/remove-dashboard-widgets-in-wordpress.html */ function roots_remove_dashboard_widgets() { remove_meta_box('dashboard_incoming_links', 'dashboard', 'normal'); remove_meta_box('dashboard_plugins', 'dashboard', 'normal'); remove_meta_box('dashboard_primary', 'dashboard', 'normal'); remove_meta_box('dashboard_secondary', 'dashboard', 'normal'); } add_action('admin_init', 'roots_remove_dashboard_widgets'); /** * Clean up the_excerpt() */ function roots_excerpt_length($length) { return POST_EXCERPT_LENGTH; } function roots_excerpt_more($more) { return ' &hellip; <a href="' . get_permalink() . '">' . __('Continued', 'roots') . '</a>'; } add_filter('excerpt_length', 'roots_excerpt_length'); add_filter('excerpt_more', 'roots_excerpt_more'); /** * Remove unnecessary self-closing tags */ function roots_remove_self_closing_tags($input) { return str_replace(' />', '>', $input); } add_filter('get_avatar', 'roots_remove_self_closing_tags'); // <img /> add_filter('comment_id_fields', 'roots_remove_self_closing_tags'); // <input /> add_filter('post_thumbnail_html', 'roots_remove_self_closing_tags'); // <img /> /** * Don't return the default description in the RSS feed if it hasn't been changed */ function roots_remove_default_description($bloginfo) { $default_tagline = 'Just another WordPress site'; return ($bloginfo === $default_tagline) ? '' : $bloginfo; } add_filter('get_bloginfo_rss', 'roots_remove_default_description'); /** * Redirects search results from /?s=query to /search/query/, converts %20 to + * * @link http://txfx.net/wordpress-plugins/nice-search/ */ function roots_nice_search_redirect() { global $wp_rewrite; if (!isset($wp_rewrite) || !is_object($wp_rewrite) || !$wp_rewrite->using_permalinks()) { return; } $search_base = $wp_rewrite->search_base; if (is_search() && !is_admin() && strpos($_SERVER['REQUEST_URI'], "/{$search_base}/") === false) { wp_redirect(home_url("/{$search_base}/" . urlencode(get_query_var('s')))); exit(); } } if (current_theme_supports('nice-search')) { add_action('template_redirect', 'roots_nice_search_redirect'); } /** * Fix for empty search queries redirecting to home page * * @link http://wordpress.org/support/topic/blank-search-sends-you-to-the-homepage#post-1772565 * @link http://core.trac.wordpress.org/ticket/11330 */ function roots_request_filter($query_vars) { if (isset($_GET['s']) && empty($_GET['s'])) { $query_vars['s'] = ' '; } return $query_vars; } add_filter('request', 'roots_request_filter'); /** * Tell WordPress to use searchform.php from the templates/ directory */ function roots_get_search_form($argument) { if ($argument === '') { locate_template('/templates/searchform.php', true, false); } } add_filter('get_search_form', 'roots_get_search_form');
1
7,968
I just updated a number of sites using older versions of Roots onto WP 3.6 and this little function change was required. Multiple search bars were displaying when I used the search widget in a widgetized sidebar. Updated the roots_get_search_form as seen in this change resolved it for me!
roots-sage
php
@@ -19,6 +19,7 @@ use Thelia\Condition\ConditionEvaluator; use Thelia\Condition\Implementation\MatchForTotalAmount; use Thelia\Condition\Operators; use Thelia\Coupon\FacadeInterface; +use Thelia\Coupon\Type\FreeProduct; use Thelia\Model\CartItem; use Thelia\Model\CountryQuery; use Thelia\Model\CurrencyQuery;
1
<?php /*************************************************************************************/ /* This file is part of the Thelia package. */ /* */ /* Copyright (c) OpenStudio */ /* email : [email protected] */ /* web : http://www.thelia.net */ /* */ /* For the full copyright and license information, please view the LICENSE.txt */ /* file that was distributed with this source code. */ /*************************************************************************************/ namespace Thelia\Coupon\Type; use Propel\Runtime\ActiveQuery\Criteria; use Propel\Runtime\Collection\ObjectCollection; use Thelia\Condition\ConditionCollection; use Thelia\Condition\ConditionEvaluator; use Thelia\Condition\Implementation\MatchForTotalAmount; use Thelia\Condition\Operators; use Thelia\Coupon\FacadeInterface; use Thelia\Model\CartItem; use Thelia\Model\CountryQuery; use Thelia\Model\CurrencyQuery; use Thelia\Model\Product; use Thelia\Model\ProductQuery; /** * @package Coupon * @author Franck Allimant <[email protected]> */ class FreeProductTest extends \PHPUnit_Framework_TestCase { /** @var Product $freeProduct */ public $freeProduct; public $originalPrice; public $originalPromo; /** * Sets up the fixture, for example, opens a network connection. * This method is called before a test is executed. */ protected function setUp() { $currency = CurrencyQuery::create()->filterByCode('EUR')->findOne(); // Find a product $this->freeProduct = ProductQuery::create() ->joinProductSaleElements("pse_join") ->addJoinCondition("pse_join", "is_default = ?", 1, null, \PDO::PARAM_INT) ->findOne() ; if (null === $this->freeProduct) { $this->markTestSkipped("You can't run this test as there's no product with associated product_sale_elements"); } $this->originalPrice = $this->freeProduct->getDefaultSaleElements()->getPricesByCurrency($currency)->getPrice(); $this->originalPromo = $this->freeProduct->getDefaultSaleElements()->getPromo(); $this->freeProduct->getDefaultSaleElements()->setPromo(false)->save(); } /** * Generate adapter stub * * @param int $cartTotalPrice Cart total price * @param string $checkoutCurrency Checkout currency * @param string $i18nOutput Output from each translation * * @return \PHPUnit_Framework_MockObject_MockObject */ public function generateFacadeStub($cartTotalPrice = 400, $checkoutCurrency = 'EUR', $i18nOutput = '') { $stubFacade = $this->getMockBuilder('\Thelia\Coupon\BaseFacade') ->disableOriginalConstructor() ->getMock(); $currencies = CurrencyQuery::create(); $currencies = $currencies->find(); $stubFacade->expects($this->any()) ->method('getAvailableCurrencies') ->will($this->returnValue($currencies)); $stubFacade->expects($this->any()) ->method('getCartTotalPrice') ->will($this->returnValue($cartTotalPrice)); $stubFacade->expects($this->any()) ->method('getCheckoutCurrency') ->will($this->returnValue($checkoutCurrency)); $stubFacade->expects($this->any()) ->method('getConditionEvaluator') ->will($this->returnValue(new ConditionEvaluator())); $stubTranslator = $this->getMockBuilder('\Thelia\Core\Translation\Translator') ->disableOriginalConstructor() ->getMock(); $stubTranslator->expects($this->any()) ->method('trans') ->will($this->returnValue($i18nOutput)); $stubFacade->expects($this->any()) ->method('getTranslator') ->will($this->returnValue($stubTranslator)); $stubDispatcher = $this->getMockBuilder('\Symfony\Component\EventDispatcher\EventDispatcher') ->disableOriginalConstructor() ->getMock(); $stubDispatcher->expects($this->any()) ->method('dispatch') ->will($this->returnCallback(function ($dummy, $cartEvent) { $ci = new CartItem(); $ci ->setId(3) ->setPrice(123) ->setPromo(0) ->setProductId($this->freeProduct->getId()) ; $cartEvent->setCartItem($ci); })); $stubFacade->expects($this->any()) ->method('getDispatcher') ->will($this->returnValue($stubDispatcher)); $stubSession = $this->getMockBuilder('\Thelia\Core\HttpFoundation\Session\Session') ->disableOriginalConstructor() ->getMock(); $stubSession->expects($this->any()) ->method('get') ->will($this->onConsecutiveCalls(-1, 3)); $stubRequest = $this->getMockBuilder('\Thelia\Core\HttpFoundation\Request') ->disableOriginalConstructor() ->getMock(); $stubRequest->expects($this->any()) ->method('getSession') ->will($this->returnValue($stubSession)); $stubFacade->expects($this->any()) ->method('getRequest') ->will($this->returnValue($stubRequest)); $country = CountryQuery::create() ->findOneByByDefault(1); $stubFacade->expects($this->any()) ->method('getDeliveryCountry') ->will($this->returnValue($country)); return $stubFacade; } public function generateMatchingCart(\PHPUnit_Framework_MockObject_MockObject $stubFacade, $count) { $product1 = ProductQuery::create()->addAscendingOrderByColumn('RAND()')->findOne(); $product2 = ProductQuery::create()->filterById($product1->getId(), Criteria::NOT_IN)->addAscendingOrderByColumn('RAND()')->findOne(); $cartItem1Stub = $this->getMockBuilder('\Thelia\Model\CartItem') ->disableOriginalConstructor() ->getMock(); $cartItem1Stub ->expects($this->any()) ->method('getProduct') ->will($this->returnValue($product1)) ; $cartItem1Stub ->expects($this->any()) ->method('getQuantity') ->will($this->returnValue(1)) ; $cartItem1Stub ->expects($this->any()) ->method('getPrice') ->will($this->returnValue(100)) ; $cartItem2Stub = $this->getMockBuilder('\Thelia\Model\CartItem') ->disableOriginalConstructor() ->getMock(); $cartItem2Stub ->expects($this->any()) ->method('getProduct') ->will($this->returnValue($product2)); $cartItem2Stub ->expects($this->any()) ->method('getQuantity') ->will($this->returnValue(2)) ; $cartItem2Stub ->expects($this->any()) ->method('getPrice') ->will($this->returnValue(150)) ; $cartStub = $this->getMockBuilder('\Thelia\Model\Cart') ->disableOriginalConstructor() ->getMock(); if ($count == 1) { $ret = [$cartItem1Stub]; } else { $ret = [$cartItem1Stub, $cartItem2Stub]; } $cartStub ->expects($this->any()) ->method('getCartItems') ->will($this->returnValue($ret)); $stubFacade->expects($this->any()) ->method('getCart') ->will($this->returnValue($cartStub)); return [$product1->getId(), $product2->getId()]; } public function generateNoMatchingCart(\PHPUnit_Framework_MockObject_MockObject $stubFacade) { $product2 = new Product(); $product2->setId(30); $cartItem2Stub = $this->getMockBuilder('\Thelia\Model\CartItem') ->disableOriginalConstructor() ->getMock(); $cartItem2Stub->expects($this->any()) ->method('getProduct') ->will($this->returnValue($product2)) ; $cartItem2Stub->expects($this->any()) ->method('getQuantity') ->will($this->returnValue(2)) ; $cartItem2Stub ->expects($this->any()) ->method('getPrice') ->will($this->returnValue(11000)) ; $cartStub = $this->getMockBuilder('\Thelia\Model\Cart') ->disableOriginalConstructor() ->getMock(); $cartStub ->expects($this->any()) ->method('getCartItems') ->will($this->returnValue([$cartItem2Stub])); $stubFacade->expects($this->any()) ->method('getCart') ->will($this->returnValue($cartStub)); } public function testSet() { $stubFacade = $this->generateFacadeStub(); $coupon = new FreeProduct($stubFacade); $date = new \DateTime(); $coupon->set( $stubFacade, 'TEST', 'TEST Coupon', 'This is a test coupon title', 'This is a test coupon description', array('percentage' => 10.00, 'products' => [10, 20], 'offered_product_id' => $this->freeProduct->getId(), 'offered_category_id' => 1), true, true, true, true, 254, $date->setTimestamp(strtotime("today + 3 months")), new ObjectCollection(), new ObjectCollection(), false ); $condition1 = new MatchForTotalAmount($stubFacade); $operators = array( MatchForTotalAmount::CART_TOTAL => Operators::SUPERIOR, MatchForTotalAmount::CART_CURRENCY => Operators::EQUAL ); $values = array( MatchForTotalAmount::CART_TOTAL => 40.00, MatchForTotalAmount::CART_CURRENCY => 'EUR' ); $condition1->setValidatorsFromForm($operators, $values); $condition2 = new MatchForTotalAmount($stubFacade); $operators = array( MatchForTotalAmount::CART_TOTAL => Operators::INFERIOR, MatchForTotalAmount::CART_CURRENCY => Operators::EQUAL ); $values = array( MatchForTotalAmount::CART_TOTAL => 400.00, MatchForTotalAmount::CART_CURRENCY => 'EUR' ); $condition2->setValidatorsFromForm($operators, $values); $conditions = new ConditionCollection(); $conditions[] = $condition1; $conditions[] = $condition2; $coupon->setConditions($conditions); $this->assertEquals('TEST', $coupon->getCode()); $this->assertEquals('TEST Coupon', $coupon->getTitle()); $this->assertEquals('This is a test coupon title', $coupon->getShortDescription()); $this->assertEquals('This is a test coupon description', $coupon->getDescription()); $this->assertEquals(true, $coupon->isCumulative()); $this->assertEquals(true, $coupon->isRemovingPostage()); $this->assertEquals(true, $coupon->isAvailableOnSpecialOffers()); $this->assertEquals(true, $coupon->isEnabled()); $this->assertEquals(254, $coupon->getMaxUsage()); $this->assertEquals($date, $coupon->getExpirationDate()); } public function testMatchOne() { $stubFacade = $this->generateFacadeStub(); $coupon = new FreeProduct($stubFacade); $date = new \DateTime(); $coupon->set( $stubFacade, 'TEST', 'TEST Coupon', 'This is a test coupon title', 'This is a test coupon description', array('percentage' => 10.00, 'products' => [10, 20], 'offered_product_id' => $this->freeProduct->getId(), 'offered_category_id' => 1), true, true, true, true, 254, $date->setTimestamp(strtotime("today + 3 months")), new ObjectCollection(), new ObjectCollection(), false ); $products = $this->generateMatchingCart($stubFacade, 1); $coupon->product_list = $products; $this->assertEquals(123.00, $coupon->exec()); } public function testMatchSeveral() { $stubFacade = $this->generateFacadeStub(); $coupon = new FreeProduct($stubFacade); $date = new \DateTime(); $coupon->set( $stubFacade, 'TEST', 'TEST Coupon', 'This is a test coupon title', 'This is a test coupon description', array('percentage' => 10.00, 'products' => [10, 20], 'offered_product_id' => $this->freeProduct->getId(), 'offered_category_id' => 1), true, true, true, true, 254, $date->setTimestamp(strtotime("today + 3 months")), new ObjectCollection(), new ObjectCollection(), false ); $products = $this->generateMatchingCart($stubFacade, 2); $coupon->product_list = $products; $this->assertEquals(123.00, $coupon->exec()); } public function testNoMatch() { $stubFacade = $this->generateFacadeStub(); $coupon = new FreeProduct($stubFacade); $date = new \DateTime(); $coupon->set( $stubFacade, 'TEST', 'TEST Coupon', 'This is a test coupon title', 'This is a test coupon description', array('percentage' => 10.00, 'products' => [10, 20], 'offered_product_id' => $this->freeProduct->getId(), 'offered_category_id' => 1), true, true, true, true, 254, $date->setTimestamp(strtotime("today + 3 months")), new ObjectCollection(), new ObjectCollection(), false ); $this->generateNoMatchingCart($stubFacade); $this->assertEquals(0.00, $coupon->exec()); } public function testGetName() { $stubFacade = $this->generateFacadeStub(399, 'EUR', 'Coupon test name'); /** @var FacadeInterface $stubFacade */ $coupon = new FreeProduct($stubFacade); $actual = $coupon->getName(); $expected = 'Coupon test name'; $this->assertEquals($expected, $actual); } public function testGetToolTip() { $tooltip = 'Coupon test tooltip'; $stubFacade = $this->generateFacadeStub(399, 'EUR', $tooltip); /** @var FacadeInterface $stubFacade */ $coupon = new FreeProduct($stubFacade); $actual = $coupon->getToolTip(); $expected = $tooltip; $this->assertEquals($expected, $actual); } /** * Tears down the fixture, for example, closes a network connection. * This method is called after a test is executed. */ protected function tearDown() { if (null !== $this->freeProduct) { $this->freeProduct->getDefaultSaleElements()->setPromo($this->originalPromo)->save(); } } }
1
12,258
Fixes test failed in some cases
thelia-thelia
php
@@ -23,9 +23,7 @@ module.exports = [ 'https://balance-staging.mercury.basicattentiontoken.org/', 'https://publishers.basicattentiontoken.org/', 'https://publishers-staging.basicattentiontoken.org/', - 'https://updates.bravesoftware.com/', // remove this once updates are moved to the prod environment - 'https://ads-serve.bravesoftware.com/', // remove this once ads catalog moves to using prod - 'https://pdfjs.robwu.nl/logpdfjs', // allowed because it gets canceled in tracking protection + 'https://updates.bravesoftware.com/', 'https://publishers-distro.basicattentiontoken.org/', 'https://publishers-staging-distro.basicattentiontoken.org/', 'https://p3a.brave.com/',
1
// Before adding to this list, get approval from the security team module.exports = [ 'http://update.googleapis.com/service/update2', // allowed because it 307's to go-updater.brave.com. should never actually connect to googleapis.com. 'https://update.googleapis.com/service/update2', // allowed because it 307's to go-updater.brave.com. should never actually connect to googleapis.com. 'https://safebrowsing.googleapis.com/v4/threatListUpdates', // allowed because it 307's to safebrowsing.brave.com 'https://clients2.googleusercontent.com/crx/blobs/', 'http://dl.google.com/release2/chrome_component/', // allowed because it 307's to crlset1.brave.com 'https://dl.google.com/release2/chrome_component/', // allowed because it 307's to crlset1.brave.com 'https://no-thanks.invalid/', // fake gaia URL 'https://go-updater.brave.com/', 'https://safebrowsing.brave.com/', 'https://brave-core-ext.s3.brave.com/', 'https://laptop-updates.brave.com/', 'https://static.brave.com/', 'https://static1.brave.com/', 'http://componentupdater.brave.com/service/update2', // allowed because it 307's to https://componentupdater.brave.com 'https://componentupdater.brave.com/service/update2', 'https://crlsets.brave.com/', 'https://crxdownload.brave.com/crx/blobs/', 'https://ledger.mercury.basicattentiontoken.org/', 'https://ledger-staging.mercury.basicattentiontoken.org/', 'https://balance.mercury.basicattentiontoken.org/', 'https://balance-staging.mercury.basicattentiontoken.org/', 'https://publishers.basicattentiontoken.org/', 'https://publishers-staging.basicattentiontoken.org/', 'https://updates.bravesoftware.com/', // remove this once updates are moved to the prod environment 'https://ads-serve.bravesoftware.com/', // remove this once ads catalog moves to using prod 'https://pdfjs.robwu.nl/logpdfjs', // allowed because it gets canceled in tracking protection 'https://publishers-distro.basicattentiontoken.org/', 'https://publishers-staging-distro.basicattentiontoken.org/', 'https://p3a.brave.com/', 'https://dns.google/dns-query', // needed for DoH on Mac build machines 'https://chrome.cloudflare-dns.com/dns-query', // needed for DoH on Mac build machines ]
1
6,366
what's the prod url for this? just curious. @amirsaber
brave-brave-browser
js
@@ -153,6 +153,12 @@ public class Constants { // Overridable plugin load properties public static final String AZ_PLUGIN_LOAD_OVERRIDE_PROPS = "azkaban.plugin.load.override.props"; + // Append JVM args to job commands + public static final String AZ_JOB_COMMAND_ARGS = "azkaban.jvm.cmd.args"; + + // Ignore the above JVM args for jobtypes which have this true. + public static final String AZ_JOB_IGNORE_JVM_ARGS = "ignore.jvm.args"; + /** * File containing param override configs * For a directory structure, property files in Proj_Dir used to have lower precedence than A.
1
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban; import java.time.Duration; /** * Constants used in configuration files or shared among classes. * * <p>Conventions: * * <p>Internal constants to be put in the {@link Constants} class * * <p>Configuration keys to be put in the {@link ConfigurationKeys} class * * <p>Flow level properties keys to be put in the {@link FlowProperties} class * * <p>Job level Properties keys to be put in the {@link JobProperties} class * * <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g. * azkaban.job.some_key</p> */ public class Constants { // Azkaban Flow Versions public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0; public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0; // Flow 2.0 file suffix public static final String PROJECT_FILE_SUFFIX = ".project"; public static final String FLOW_FILE_SUFFIX = ".flow"; // Flow 2.0 node type public static final String NODE_TYPE = "type"; public static final String FLOW_NODE_TYPE = "flow"; // Flow 2.0 flow and job path delimiter public static final String PATH_DELIMITER = ":"; // Job properties override suffix public static final String JOB_OVERRIDE_SUFFIX = ".jor"; // Key for the root node of the DAG in runtime properties public static final String ROOT_NODE_IDENTIFIER = "ROOT"; // Names and paths of various file names to configure Azkaban public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String DEFAULT_EXECUTOR_PORT_FILE = "executor.port"; public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app"; public static final String AZKABAN_CONTAINER_CONTEXT_KEY = "flow_container"; // Internal username used to perform SLA action public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla"; // Memory check retry interval when OOM in ms public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1; // Max number of memory check retry public static final int MEMORY_CHECK_RETRY_LIMIT = 720; public static final int DEFAULT_PORT_NUMBER = 8081; public static final int DEFAULT_SSL_PORT_NUMBER = 8443; public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20; // Configures the form limits for the web application public static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024; // One Schedule's default End Time: 01/01/2050, 00:00:00, UTC public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L; // Default flow trigger max wait time public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10); public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1); public static final int DEFAULT_MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = 20; // The flow exec id for a flow trigger instance which hasn't started a flow yet public static final int UNASSIGNED_EXEC_ID = -1; // The flow exec id for a flow trigger instance unable to trigger a flow yet public static final int FAILED_EXEC_ID = -2; // Default locked flow error message public static final String DEFAULT_LOCKED_FLOW_ERROR_MESSAGE = "Flow %s in project %s is locked. This is either a repeatedly failing flow, or an ineffcient" + " flow. Please refer to the Dr. Elephant report for this flow for more information."; // Default maximum number of concurrent runs for a single flow public static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30; // How often executors will poll new executions in Poll Dispatch model public static final int DEFAULT_AZKABAN_POLLING_INTERVAL_MS = 1000; // Executors can use cpu load calculated from this period to take/skip polling turns public static final int DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = 60; // Default value to feature enable setting. To be backward compatible, this value === FALSE public static final boolean DEFAULT_AZKABAN_RAMP_ENABLED = false; // Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to push result into DB every N finished ramped workflows public static final int DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = 20; // Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to pull result from DB every N new ramped workflows public static final int DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = 50; // Use Polling Service to sync the ramp status cross EXEC Server. public static final boolean DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED = false; // How often executors will poll ramp status in Poll Dispatch model public static final int DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL = 10; // Username to be sent to UserManager when OAuth is in use, and real username is not available: public static final String OAUTH_USERNAME_PLACEHOLDER = "<OAuth>"; // Used by UserManager for password validation (to tell apart real passwords from auth codes). // Empirically, passwords are shorter than this, and ACs are longer: public static final int OAUTH_MIN_AUTHCODE_LENGTH = 80; // Used (or should be used) wherever a string representation of UTF_8 charset is needed: public static final String UTF_8 = java.nio.charset.StandardCharsets.UTF_8.toString(); // Specifies the source(adhoc, scheduled, event) from where flow execution is triggered public static final String EXECUTION_SOURCE_ADHOC = "adhoc"; public static final String EXECUTION_SOURCE_SCHEDULED = "schedule"; public static final String EXECUTION_SOURCE_EVENT = "event"; public static final String CONTENT_TYPE_TEXT_PLAIN = "text/plain"; public static final String CHARACTER_ENCODING_UTF_8 = "utf-8"; // Use in-memory keystore public static final String USE_IN_MEMORY_KEYSTORE = "use.in-memory.keystore"; // AZ_HOME in containerized execution public static final String AZ_HOME = "AZ_HOME"; // Flow restart action on EXECUTION_STOPPED public static final String RESTART_FLOW = "Restart Flow"; // Overridable plugin load properties public static final String AZ_PLUGIN_LOAD_OVERRIDE_PROPS = "azkaban.plugin.load.override.props"; /** * File containing param override configs * For a directory structure, property files in Proj_Dir used to have lower precedence than A. * For the newly introduced file, this will no longer be true * Proj_Dir * basic.properties * param_override.properties * A/ * foo_a.properties * foo.job * i.e. * (a). param_override.properties precedence will be higher than foo_a.properties. * (b). foo_a.properties precedence will be higher than that of basic.properties. */ public static final String PARAM_OVERRIDE_FILE = "param_override.properties"; // Azkaban event reporter constants public static class EventReporterConstants { public static final String FLOW_NAME = "flowName"; public static final String AZ_HOST = "azkabanHost"; public static final String AZ_WEBSERVER = "azkabanWebserver"; public static final String PROJECT_NAME = "projectName"; public static final String SUBMIT_USER = "submitUser"; public static final String START_TIME = "startTime"; public static final String END_TIME = "endTime"; public static final String FLOW_STATUS = "flowStatus"; public static final String EXECUTION_ID = "executionId"; public static final String SUBMIT_TIME = "submitTime"; public static final String FLOW_VERSION = "flowVersion"; public static final String FAILED_JOB_ID = "failedJobId"; public static final String MODIFIED_BY = "modifiedBy"; public static final String FLOW_KILL_DURATION = "flowKillDuration"; public static final String FLOW_PAUSE_DURATION = "flowPauseDuration"; public static final String FLOW_PREPARATION_DURATION = "flowPreparationDuration"; public static final String SLA_OPTIONS = "slaOptions"; public static final String VERSION_SET = "versionSet"; public static final String EXECUTOR_TYPE = "executorType"; public static final String PROJECT_FILE_UPLOAD_USER = "projectFileUploadUser"; public static final String PROJECT_FILE_UPLOADER_IP_ADDR = "projectFileUploaderIpAddr"; public static final String PROJECT_FILE_NAME = "projectFileName"; public static final String PROJECT_FILE_UPLOAD_TIME = "projectFileUploadTime"; public static final String JOB_ID = "jobId"; public static final String JOB_TYPE = "jobType"; public static final String VERSION = "version"; public static final String JOB_PROXY_USER = "jobProxyUser"; public static final String ATTEMPT_ID = "attemptId"; public static final String JOB_KILL_DURATION = "jobKillDuration"; public static final String QUEUE_DURATION = "queueDuration"; public static final String FAILURE_MESSAGE = "failureMessage"; public static final String JOB_STATUS = "jobStatus"; public static final String EFFECTIVE_USERS = "effectiveUsers"; public static final String CPU_UTILIZED = "cpuUtilized"; public static final String MEMORY_UTILIZED_IN_BYTES = "memoryUtilizedInBytes"; } public static class ConfigurationKeys { public static final String AZKABAN_CLUSTER_NAME = "azkaban.cluster.name"; public static final String AZKABAN_CLUSTER_ENV = "azkaban.cluster.env"; public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties"; // Property to enable appropriate dispatch model public static final String AZKABAN_EXECUTION_DISPATCH_METHOD = "azkaban.execution.dispatch.method"; // Configures Azkaban to use new polling model for dispatching public static final String AZKABAN_POLLING_INTERVAL_MS = "azkaban.polling.interval.ms"; public static final String AZKABAN_POLLING_LOCK_ENABLED = "azkaban.polling.lock.enabled"; public static final String AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE = "azkaban.polling_criteria.flow_threads_available"; public static final String AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB = "azkaban.polling_criteria.min_free_memory_gb"; public static final String AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT = "azkaban.polling_criteria.max_cpu_utilization_pct"; public static final String AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = "azkaban.polling_criteria.cpu_load_period_sec"; // Configures properties for Azkaban executor health check public static final String AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN = "azkaban.executor.healthcheck.interval.min"; public static final String AZKABAN_EXECUTOR_MAX_FAILURE_COUNT = "azkaban.executor.max.failurecount"; public static final String AZKABAN_ADMIN_ALERT_EMAIL = "azkaban.admin.alert.email"; // Configures Azkaban Flow Version in project YAML file public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version"; // These properties are configurable through azkaban.properties public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename"; // External URL template of a given topic, specified in the list defined above //Deprecated, it is replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_URL public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url"; // Designates one of the external link topics to correspond to an execution analyzer //Deprecated, replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPICS public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic"; //Deprecated, it is replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_LABEL public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label"; // Defines a list of external links, each referred to as a topic // external links defined here will be translated into buttons and rendered in the Flow Execution page public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPICS = "azkaban.server.external.analyzer.topics"; // Defines timeout in milliseconds for azkaban to validate external links // If this config is missing, azkaban will use default 3000 milliseconds as timeout. // If validation fails, buttons is disabled in Flow Execution page. public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TIMEOUT_MS = "azkaban.server.external.analyzer.timeout.ms"; // Designates one of the external link topics to correspond to an execution analyzer public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_LABEL = "azkaban.server" + ".external.analyzer.${topic}.label"; // External URL template of a given topic, specified in the list defined above public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_URL = "azkaban.server" + ".external.analyzer.${topic}.url"; // Designates one of the external link topics to correspond to a job log viewer public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic"; public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label"; /* * Hadoop/Spark user job link. * Example: * a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id} * b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id} * c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs * */ public static final String HADOOP_CLUSTER_URL = "azkaban.server.external.hadoop_cluster_url"; public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url"; public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url"; public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url"; // Configures the Kafka appender for logging user jobs, specified for the exec server public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList"; public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic"; // Represent the class name of azkaban metrics reporter. public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name"; // Represent the metrics server URL. public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url"; public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled"; public static final String MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = "azkaban.metrics" + ".min_age_for_classifying_a_flow_aged_minutes"; // User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users. // enduser -> myazkabanhost:443 -> proxy -> localhost:8081 // when this parameters set then these parameters are used to generate email links. // if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used. public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname"; public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port"; public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port"; // Hostname for the host, if not specified, canonical hostname will be used public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname"; // List of users we prevent azkaban from running flows as. (ie: root, azkaban) public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users"; // Path name of execute-as-user executable public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib"; // Name of *nix group associated with the process running Azkaban public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name"; // Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs. // Jetty server configurations. public static final String JETTY_HEADER_BUFFER_SIZE = "jetty.headerBufferSize"; public static final String JETTY_USE_SSL = "jetty.use.ssl"; public static final String JETTY_SSL_PORT = "jetty.ssl.port"; public static final String JETTY_PORT = "jetty.port"; public static final String EXECUTOR_PORT_FILE = "executor.portfile"; // To set a fixed port for executor-server. Otherwise some available port is used. public static final String EXECUTOR_PORT = "executor.port"; public static final String EXECUTOR_SSL_PORT = "executor.ssl.port"; public static final String DEFAULT_TIMEZONE_ID = "default.timezone.id"; // Boolean config set on the Web server to prevent users from creating projects. When set to // true only admins or users with CREATEPROJECTS permission can create projects. public static final String LOCKDOWN_CREATE_PROJECTS_KEY = "lockdown.create.projects"; // Boolean config set on the Web server to prevent users from uploading projects. When set to // true only admins or users with UPLOADPROJECTS permission can upload projects. public static final String LOCKDOWN_UPLOAD_PROJECTS_KEY = "lockdown.upload.projects"; // Max flow running time in mins, server will kill flows running longer than this setting. // if not set or <= 0, then there's no restriction on running time. public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes"; // Maximum number of tries to download a dependency (no more retry attempts will be made after this many download failures) public static final String AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES = "azkaban.dependency.max.download.tries"; public static final String AZKABAN_DEPENDENCY_DOWNLOAD_THREADPOOL_SIZE = "azkaban.dependency.download.threadpool.size"; public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type"; public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir"; public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path"; // This really should be azkaban.storage.hdfs.project_root.uri public static final String AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI = "azkaban.storage.hdfs.root.uri"; public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED = "azkaban.storage.cache.dependency.enabled"; public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI = "azkaban.storage.cache.dependency_root.uri"; public static final String AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI = "azkaban.storage.origin.dependency_root.uri"; public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal"; public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path"; public static final String PROJECT_TEMP_DIR = "project.temp.dir"; // Event reporting properties public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM = "azkaban.event.reporting.class"; public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled"; // Comma separated list of properties to propagate from flow to Event reporter metadata public static final String AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE = "azkaban.event.reporting.propagateProperties"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS = "azkaban.event.reporting.kafka.brokers"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC = "azkaban.event.reporting.kafka.topic"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL = "azkaban.event.reporting.kafka.schema.registry.url"; /* * The max number of artifacts retained per project. * Accepted Values: * - 0 : Save all artifacts. No clean up is done on storage. * - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage * * Note: Having an unacceptable value results in an exception and the service would REFUSE * to start. * * Example: * a) azkaban.storage.artifact.max.retention=all * implies save all artifacts * b) azkaban.storage.artifact.max.retention=3 * implies save latest 3 versions saved in storage. **/ public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention"; // enable quartz scheduler and flow trigger if true. public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz"; public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential"; public static final String OAUTH_CREDENTIAL_NAME = "azkaban.oauth.credential"; public static final String SECURITY_USER_GROUP = "azkaban.security.user.group"; public static final String CSR_KEYSTORE_LOCATION = "azkaban.csr.keystore.location"; // dir to keep dependency plugins public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir"; public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors"; public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow"; // list of whitelisted flows, with specific max number of concurrent runs. Format: // <project 1>,<flow 1>,<number>;<project 2>,<flow 2>,<number> public static final String CONCURRENT_RUNS_ONEFLOW_WHITELIST = "azkaban.concurrent.runs.oneflow.whitelist"; public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size"; public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS = "azkaban.activeexecutor.refresh.milisecinterval"; public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW = "azkaban.activeexecutor.refresh.flowinterval"; public static final String EXECUTORINFO_REFRESH_MAX_THREADS = "azkaban.executorinfo.refresh.maxThreads"; public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors"; public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters"; public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX = "azkaban.executorselector.comparator."; public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled"; public static final String QUEUE_PROCESSOR_WAIT_IN_MS = "azkaban.queue.processor.wait.in.ms"; public static final String SESSION_TIME_TO_LIVE = "session.time.to.live"; // allowed max number of sessions per user per IP public static final String MAX_SESSION_NUMBER_PER_IP_PER_USER = "azkaban.session" + ".max_number_per_ip_per_user"; // allowed max size of shared project dir (percentage of partition size), e.g 0.8 public static final String PROJECT_CACHE_SIZE_PERCENTAGE = "azkaban.project_cache_size_percentage_of_disk"; public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE = "azkaban.project_cache_throttle_percentage"; // how many older versions of project files are kept in DB before deleting them public static final String PROJECT_VERSION_RETENTION = "project.version.retention"; // number of rows to be displayed on the executions page. public static final String DISPLAY_EXECUTION_PAGE_SIZE = "azkaban.display.execution_page_size"; // locked flow error message. Parameters passed in are the flow name and project name. public static final String AZKABAN_LOCKED_FLOW_ERROR_MESSAGE = "azkaban.locked.flow.error.message"; // flow ramp related setting keys // Default value to feature enable setting. To be backward compatible, this value === FALSE public static final String AZKABAN_RAMP_ENABLED = "azkaban.ramp.enabled"; // Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to push result into DB every N finished ramped workflows public static final String AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = "azkaban.ramp.status.push.interval.max"; // Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to pull result from DB every N new ramped workflows public static final String AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = "azkaban.ramp.status.pull.interval.max"; // A Polling Service can be applied to determine the ramp status synchronization interval. public static final String AZKABAN_RAMP_STATUS_POLLING_ENABLED = "azkaban.ramp.status.polling.enabled"; public static final String AZKABAN_RAMP_STATUS_POLLING_INTERVAL = "azkaban.ramp.status.polling.interval"; public static final String AZKABAN_RAMP_STATUS_POLLING_CPU_MAX = "azkaban.ramp.status.polling.cpu.max"; public static final String AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN = "azkaban.ramp.status.polling.memory.min"; public static final String EXECUTION_LOGS_RETENTION_MS = "execution.logs.retention.ms"; public static final String EXECUTION_LOGS_CLEANUP_INTERVAL_SECONDS = "execution.logs.cleanup.interval.seconds"; public static final String EXECUTION_LOGS_CLEANUP_RECORD_LIMIT = "execution.logs.cleanup.record.limit"; // Oauth2.0 configuration keys. If missing, no OAuth will be attempted, and the old // username/password{+2FA} prompt will be given for interactive login: public static final String OAUTH_PROVIDER_URI_KEY = "oauth.provider_uri"; // where to send user for OAuth flow, e.g.: // oauth.provider_uri=https://login.microsoftonline.com/tenant-id/oauth2/v2.0/authorize\ // ?client_id=client_id\ // &response_type=code\ // &scope=openid\ // &response_mode=form_post\ // &state={state}\ // &redirect_uri={redirect_uri} // Strings {state} and {redirect_uri}, if present verbatim in the property value, will be // substituted at runtime with (URL-encoded) navigation target and OAuth responce handler URIs, // respectively. See handleOauth() in LoginAbstractServlet.java for details. public static final String OAUTH_REDIRECT_URI_KEY = "oauth.redirect_uri"; // how OAuth calls us back, e.g.: // oauth.redirect_uri=http://localhost:8081/?action=oauth_callback // By default job props always win over flow override props. // If this flag is set to true, then override props override also override existing job props. public static final String AZKABAN_EXECUTOR_RUNTIME_PROPS_OVERRIDE_EAGER = "azkaban.executor.runtimeProps.override.eager"; // Executor client TLS properties public static final String EXECUTOR_CLIENT_TLS_ENABLED = "azkaban.executor.client.tls.enabled"; public static final String EXECUTOR_CLIENT_TRUSTSTORE_PATH = "azkaban.executor.client.truststore"; public static final String EXECUTOR_CLIENT_TRUSTSTORE_PASSWORD = "azkaban.executor.client.trustpassword"; public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_ENABLED = "azkaban.executor.reverse.proxy.enabled"; public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_HOSTNAME = "azkaban.executor.reverse.proxy.hostname"; public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_PORT = "azkaban.executor.reverse.proxy.port"; // Job callback public static final String AZKABAN_EXECUTOR_JOBCALLBACK_ENABLED = "azkaban.executor.jobcallback.enabled"; } public static class FlowProperties { // Basic properties of flows as set by the executor server public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname"; public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid"; public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser"; public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid"; public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion"; } public static class JobProperties { // Job property that enables/disables using Kafka logging of user job logs public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable"; /* * this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available. * EXTRA_HCAT_CLUSTERS has the following format: * other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port" * Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster. * The uris(hcat servers) in a "cluster" ensures HA is provided. **/ public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters"; /* * the settings to be defined by user indicating if there are hcat locations other than the * default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are * supported, use comma to separate the values, values are case insensitive. **/ // Use EXTRA_HCAT_CLUSTERS instead @Deprecated public static final String EXTRA_HCAT_LOCATION = "other_hcat_location"; // If true, AZ will fetches the jobs' certificate from remote Certificate Authority. public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl"; // If true, AZ will fetch OAuth token from credential provider public static final String ENABLE_OAUTH = "azkaban.enable.oauth"; // Job properties that indicate maximum memory size public static final String JOB_MAX_XMS = "job.max.Xms"; public static final String MAX_XMS_DEFAULT = "1G"; public static final String JOB_MAX_XMX = "job.max.Xmx"; public static final String MAX_XMX_DEFAULT = "2G"; // The hadoop user the job should run under. If not specified, it will default to submit user. public static final String USER_TO_PROXY = "user.to.proxy"; /** * Format string for Log4j's EnhancedPatternLayout */ public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout"; } public static class JobCallbackProperties { public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout"; public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout"; public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout"; public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout"; public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size"; } public static class FlowTriggerProps { // Flow trigger props public static final String SCHEDULE_TYPE = "type"; public static final String CRON_SCHEDULE_TYPE = "cron"; public static final String SCHEDULE_VALUE = "value"; public static final String DEP_NAME = "name"; // Flow trigger dependency run time props public static final String START_TIME = "startTime"; public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId"; } public static class PluginManager { public static final String JOBTYPE_DEFAULTDIR = "plugins/jobtypes"; public static final String RAMPPOLICY_DEFAULTDIR = "plugins/ramppolicies"; // need jars.to.include property, will be loaded with user property public static final String CONFFILE = "plugin.properties"; // not exposed to users public static final String SYSCONFFILE = "private.properties"; // common properties for multiple plugins public static final String COMMONCONFFILE = "common.properties"; // common private properties for multiple plugins public static final String COMMONSYSCONFFILE = "commonprivate.properties"; // mapping for the jobType to default proxy user public static final String DEFAULT_PROXY_USERS_FILE = "default-proxy-users.properties"; // allowed jobType classes for default proxy user public static final String DEFAULT_PROXY_USERS_JOBTYPE_CLASSES = "default.proxyusers.jobtype" + ".classes"; // users not allowed as default proxy user public static final String DEFAULT_PROXY_USERS_FILTER = "default.proxyusers.filter"; } public static class ContainerizedDispatchManagerProperties { public static final String AZKABAN_CONTAINERIZED_PREFIX = "azkaban.containerized."; public static final String CONTAINERIZED_IMPL_TYPE = AZKABAN_CONTAINERIZED_PREFIX + "impl.type"; public static final String CONTAINERIZED_EXECUTION_BATCH_ENABLED = AZKABAN_CONTAINERIZED_PREFIX + "execution.batch.enabled"; public static final String CONTAINERIZED_EXECUTION_BATCH_SIZE = AZKABAN_CONTAINERIZED_PREFIX + "execution.batch.size"; public static final String CONTAINERIZED_EXECUTION_PROCESSING_THREAD_POOL_SIZE = AZKABAN_CONTAINERIZED_PREFIX + "execution.processing.thread.pool.size"; public static final String CONTAINERIZED_CREATION_RATE_LIMIT = AZKABAN_CONTAINERIZED_PREFIX + "creation.rate.limit"; public static final String CONTAINERIZED_RAMPUP = AZKABAN_CONTAINERIZED_PREFIX + "rampup"; public static final String CONTAINERIZED_JOBTYPE_ALLOWLIST = AZKABAN_CONTAINERIZED_PREFIX + "jobtype.allowlist"; public static final String CONTAINERIZED_PROXY_USER_DENYLIST = AZKABAN_CONTAINERIZED_PREFIX + "proxy.user.denylist"; // Kubernetes related properties public static final String AZKABAN_KUBERNETES_PREFIX = "azkaban.kubernetes."; public static final String KUBERNETES_NAMESPACE = AZKABAN_KUBERNETES_PREFIX + "namespace"; public static final String KUBERNETES_KUBE_CONFIG_PATH = AZKABAN_KUBERNETES_PREFIX + "kube.config.path"; // Kubernetes pod related properties public static final String KUBERNETES_POD_PREFIX = AZKABAN_KUBERNETES_PREFIX + "pod."; public static final String KUBERNETES_POD_NAME_PREFIX = KUBERNETES_POD_PREFIX + "name.prefix"; public static final String KUBERNETES_POD_AZKABAN_BASE_IMAGE_NAME = AZKABAN_KUBERNETES_PREFIX + "azkaban-base.image.name"; public static final String KUBERNETES_POD_AZKABAN_CONFIG_IMAGE_NAME = AZKABAN_KUBERNETES_PREFIX + "azkaban-config.image.name"; public static final String KUBERNETES_POD_SERVICE_ACCOUNT_TOKEN_AUTOMOUNT = KUBERNETES_POD_PREFIX + "service.account.token.automount"; // Kubernetes flow container related properties public static final String KUBERNETES_FLOW_CONTAINER_PREFIX = AZKABAN_KUBERNETES_PREFIX + "flow.container."; public static final String KUBERNETES_FLOW_CONTAINER_NAME = KUBERNETES_FLOW_CONTAINER_PREFIX + ".name"; public static final String KUBERNETES_FLOW_CONTAINER_CPU_LIMIT_MULTIPLIER = KUBERNETES_FLOW_CONTAINER_PREFIX + "cpu.limit.multiplier"; public static final String KUBERNETES_FLOW_CONTAINER_MAX_ALLOWED_CPU = KUBERNETES_FLOW_CONTAINER_PREFIX + "max.allowed.cpu"; public static final String KUBERNETES_FLOW_CONTAINER_CPU_REQUEST = KUBERNETES_FLOW_CONTAINER_PREFIX + "cpu.request"; public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT_MULTIPLIER = KUBERNETES_FLOW_CONTAINER_PREFIX + "memory.limit.multiplier"; public static final String KUBERNETES_FLOW_CONTAINER_MAX_ALLOWED_MEMORY = KUBERNETES_FLOW_CONTAINER_PREFIX + "max.allowed.memory"; public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST = KUBERNETES_FLOW_CONTAINER_PREFIX + "memory.request"; public static final String KUBERNETES_FLOW_CONTAINER_SECRET_NAME = KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.name"; public static final String KUBERNETES_FLOW_CONTAINER_SECRET_VOLUME = KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.volume"; public static final String KUBERNETES_FLOW_CONTAINER_SECRET_MOUNTPATH = KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.mountpath"; public static final String KUBERNETES_INIT_MOUNT_PATH_FOR_JOBTYPES = KUBERNETES_FLOW_CONTAINER_PREFIX + "init.jobtypes.mount.path"; public static final String KUBERNETES_MOUNT_PATH_FOR_JOBTYPES = KUBERNETES_FLOW_CONTAINER_PREFIX + "jobtypes.mount.path"; public static final String KUBERNETES_POD_TEMPLATE_PATH = KUBERNETES_POD_PREFIX + "template.path"; public static final String KUBERNETES_DEPENDENCY_TYPES = KUBERNETES_FLOW_CONTAINER_PREFIX + "dependencyTypes"; public static final String KUBERNETES_INIT_MOUNT_PATH_FOR_DEPENDENCIES = KUBERNETES_FLOW_CONTAINER_PREFIX + "init.dependencies.mount.path"; public static final String KUBERNETES_MOUNT_PATH_FOR_DEPENDENCIES = KUBERNETES_FLOW_CONTAINER_PREFIX + "dependencies.mount.path"; // Kubernetes service related properties public static final String KUBERNETES_SERVICE_PREFIX = AZKABAN_KUBERNETES_PREFIX + "service."; public static final String KUBERNETES_SERVICE_REQUIRED = KUBERNETES_SERVICE_PREFIX + "required"; public static final String KUBERNETES_SERVICE_NAME_PREFIX = KUBERNETES_SERVICE_PREFIX + "name.prefix"; public static final String KUBERNETES_SERVICE_PORT = KUBERNETES_SERVICE_PREFIX + "port"; public static final String KUBERNETES_SERVICE_CREATION_TIMEOUT_MS = KUBERNETES_SERVICE_PREFIX + "creation.timeout.ms"; // Kubernetes Watch related properties public static final String KUBERNETES_WATCH_PREFIX = AZKABAN_KUBERNETES_PREFIX + "watch."; public static final String KUBERNETES_WATCH_ENABLED = KUBERNETES_WATCH_PREFIX + "enabled"; public static final String KUBERNETES_WATCH_EVENT_CACHE_MAX_ENTRIES = KUBERNETES_WATCH_PREFIX + "cache.max.entries"; // Periodicity of lookup and cleanup of stale executions. public static final String CONTAINERIZED_STALE_EXECUTION_CLEANUP_INTERVAL_MIN = AZKABAN_CONTAINERIZED_PREFIX + "stale.execution.cleanup.interval.min"; public static final String ENV_VERSION_SET_ID = "VERSION_SET_ID"; public static final String ENV_FLOW_EXECUTION_ID = "FLOW_EXECUTION_ID"; public static final String ENV_JAVA_ENABLE_DEBUG = "JAVA_ENABLE_DEBUG"; public static final String ENV_ENABLE_DEV_POD = "ENABLE_DEV_POD"; public static final String ENV_CPU_REQUEST = "CPU_REQUEST"; public static final String ENV_MEMORY_REQUEST = "MEMORY_REQUEST"; } public static class ImageMgmtConstants { public static final String IMAGE_TYPE = "imageType"; public static final String IMAGE_VERSION = "imageVersion"; public static final String VERSION_STATE = "versionState"; public static final String ID_KEY = "id"; public static final String IMAGE_RAMPUP_PLAN = "imageRampupPlan"; } public static class FlowParameters { // Constants for Flow parameters public static final String FLOW_PARAM_VERSION_SET_ID = "azkaban.version-set.id"; // Constant to enable java remote debug for Flow Container public static final String FLOW_PARAM_JAVA_ENABLE_DEBUG = "java.enable.debug"; // Constant to enable pod for developer testing public static final String FLOW_PARAM_ENABLE_DEV_POD = "enable.dev.pod"; // Constant to disable pod cleanup through the kubernetes watch public static final String FLOW_PARAM_DISABLE_POD_CLEANUP = "disable.pod.cleanup"; // Constant to dispatch execution to Containerization public static final String FLOW_PARAM_DISPATCH_EXECUTION_TO_CONTAINER = "dispatch.execution.to.container"; // Constant for cpu request for flow container public static final String FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST = "flow.container.cpu.request"; // Constant for memory request for flow container public static final String FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST = "flow.container.memory.request"; public static final String FLOW_PARAM_POD_ENV_VAR = "pod.env.var."; // Constant to allow test version to be passed as flow parameter. Passing test version will be // allowed for Azkaban ADMIN role only public static final String FLOW_PARAM_ALLOW_IMAGE_TEST_VERSION = "allow.image.test.version"; public static final String FLOW_PARAM_ALLOW_RESTART_ON_EXECUTION_STOPPED = "allow.restart.on.execution.stopped"; } }
1
22,874
Can you please change this to azkaban.jobs.java.opts?
azkaban-azkaban
java
@@ -66,11 +66,10 @@ function AcquisitionPieChart( { data, args, source } ) { let sourceMessage = ''; if ( source ) { - sourceMessage = sprintf( - /* translators: %1$s: URL to Analytics Module page in Site Kit Admin, %2$s: Analytics (Service Name) */ - __( 'Source: <a class="googlesitekit-cta-link googlesitekit-cta-link--external googlesitekit-cta-link--inherit" href="%1$s">%2$s</a>', 'google-site-kit' ), + sourceMessage = __( 'Source:', 'google-site-kit' ) + sprintf( + ' <a class="googlesitekit-cta-link googlesitekit-cta-link--external googlesitekit-cta-link--inherit" href="%1$s">%2$s</a>', getSiteKitAdminURL( 'googlesitekit-module-analytics' ), - _x( 'Analytics', 'Service name', 'google-site-kit' ), + _x( 'Analytics', 'Service name', 'google-site-kit' ) ); }
1
/** * AcquisitionPieChart component. * * Site Kit by Google, Copyright 2020 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ import PropTypes from 'prop-types'; /** * WordPress dependencies */ import { __, _x, sprintf } from '@wordpress/i18n'; /** * Internal dependencies */ import GoogleChart from '../../../../components/google-chart'; import { getSiteKitAdminURL, sanitizeHTML } from '../../../../util'; import { extractAnalyticsDataForTrafficChart } from '../../util'; const GOOGLE_CHART_PIE_SETTINGS = { chartArea: { width: '100%', height: '100%', }, backgroundColor: 'transparent', height: 250, legend: { alignment: 'center', textStyle: { color: '#5b5b61', fontSize: 12, }, }, slices: { 0: { color: '#178EC5' }, 1: { color: '#54B23B' }, 2: { color: '#EB5729' }, 3: { color: '#ECED33' }, 4: { color: '#34CBE3' }, 5: { color: '#82E88E' }, }, title: null, width: '100%', }; function AcquisitionPieChart( { data, args, source } ) { if ( ! data ) { return null; } let sourceMessage = ''; if ( source ) { sourceMessage = sprintf( /* translators: %1$s: URL to Analytics Module page in Site Kit Admin, %2$s: Analytics (Service Name) */ __( 'Source: <a class="googlesitekit-cta-link googlesitekit-cta-link--external googlesitekit-cta-link--inherit" href="%1$s">%2$s</a>', 'google-site-kit' ), getSiteKitAdminURL( 'googlesitekit-module-analytics' ), _x( 'Analytics', 'Service name', 'google-site-kit' ), ); } return ( <div className="googlesitekit-chart googlesitekit-chart--pie"> <GoogleChart data={ extractAnalyticsDataForTrafficChart( data, args.url ? 1 : 0 ) } options={ GOOGLE_CHART_PIE_SETTINGS } chartType="pie" id="overview-piechart" loadHeight={ 205 } /> { source && ( <div className="googlesitekit-chart__source" dangerouslySetInnerHTML={ sanitizeHTML( sourceMessage, { ALLOWED_TAGS: [ 'a' ], ALLOWED_ATTR: [ 'href', 'class' ], } ) } /> ) } </div> ); } AcquisitionPieChart.propTypes = { data: PropTypes.arrayOf( PropTypes.object ), args: PropTypes.shape( { url: PropTypes.string } ).isRequired, source: PropTypes.bool, }; AcquisitionPieChart.defaultProps = { source: false, }; export default AcquisitionPieChart;
1
32,232
Here is another concatenation which should be updated. Even though `Source:` and the link are essentially separate, it isn't RTL friendly. This would be another good use for `createInterpolateElement` I think so that we wouldn't need to include all of the classnames in the translation string (or extract them to a placeholder). That would allow us to use the `Link` component as well, rather than using its resulting HTML as a string which will be a pain to maintain if we were to make changes to the base `Link` component.
google-site-kit-wp
js
@@ -26,12 +26,15 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Infrastructure #endif } - public static Task<int> GetCancelledZeroTask() + public static Task<int> GetCancelledZeroTask(CancellationToken cancellationToken = default(CancellationToken)) { - // Task<int>.FromCanceled doesn't return Task<int> +#if NETSTANDARD1_3 + return Task.FromCanceled<int>(cancellationToken); +#else var tcs = new TaskCompletionSource<int>(); tcs.TrySetCanceled(); return tcs.Task; +#endif } } }
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System.Threading; using System.Threading.Tasks; namespace Microsoft.AspNetCore.Server.Kestrel.Infrastructure { public static class TaskUtilities { #if NETSTANDARD1_3 public static Task CompletedTask = Task.CompletedTask; #else public static Task CompletedTask = Task.FromResult<object>(null); #endif public static Task<int> ZeroTask = Task.FromResult(0); public static Task GetCancelledTask(CancellationToken cancellationToken) { #if NETSTANDARD1_3 return Task.FromCanceled(cancellationToken); #else var tcs = new TaskCompletionSource<object>(); tcs.TrySetCanceled(); return tcs.Task; #endif } public static Task<int> GetCancelledZeroTask() { // Task<int>.FromCanceled doesn't return Task<int> var tcs = new TaskCompletionSource<int>(); tcs.TrySetCanceled(); return tcs.Task; } } }
1
8,668
I would add an overload `GetCancelledZeroTask()` which calls `GetCancelledZeroTask(CancellationToken.None)`. This is cleaner than requiring the caller to pass `default(CancellationToken)` or `CancellationToken.None`.
aspnet-KestrelHttpServer
.cs
@@ -295,9 +295,13 @@ void BestPractices::PreCallRecordDestroyImage(VkDevice device, VkImage image, co } void BestPractices::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator) { - SWAPCHAIN_NODE* chain = GetSwapchainState(swapchain); - for (auto& image : chain->images) { - ReleaseImageUsageState(image.image_state->image()); + if (VK_NULL_HANDLE != swapchain) { + SWAPCHAIN_NODE* chain = GetSwapchainState(swapchain); + for (auto& image : chain->images) { + if (image.image_state) { + ReleaseImageUsageState(image.image_state->image()); + } + } } ValidationStateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator); }
1
/* Copyright (c) 2015-2021 The Khronos Group Inc. * Copyright (c) 2015-2021 Valve Corporation * Copyright (c) 2015-2021 LunarG, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Camden Stocker <[email protected]> */ #include "best_practices_validation.h" #include "layer_chassis_dispatch.h" #include "best_practices_error_enums.h" #include "shader_validation.h" #include "sync_utils.h" #include <string> #include <bitset> #include <memory> struct VendorSpecificInfo { EnableFlags vendor_id; std::string name; }; const std::map<BPVendorFlagBits, VendorSpecificInfo> kVendorInfo = { {kBPVendorArm, {vendor_specific_arm, "Arm"}}, }; bool BestPractices::VendorCheckEnabled(BPVendorFlags vendors) const { for (const auto& vendor : kVendorInfo) { if (vendors & vendor.first && enabled[vendor.second.vendor_id]) { return true; } } return false; } const char* VendorSpecificTag(BPVendorFlags vendors) { // Cache built vendor tags in a map static layer_data::unordered_map<BPVendorFlags, std::string> tag_map; auto res = tag_map.find(vendors); if (res == tag_map.end()) { // Build the vendor tag string std::stringstream vendor_tag; vendor_tag << "["; bool first_vendor = true; for (const auto& vendor : kVendorInfo) { if (vendors & vendor.first) { if (!first_vendor) { vendor_tag << ", "; } vendor_tag << vendor.second.name; first_vendor = false; } } vendor_tag << "]"; tag_map[vendors] = vendor_tag.str(); res = tag_map.find(vendors); } return res->second.c_str(); } const char* DepReasonToString(ExtDeprecationReason reason) { switch (reason) { case kExtPromoted: return "promoted to"; break; case kExtObsoleted: return "obsoleted by"; break; case kExtDeprecated: return "deprecated by"; break; default: return ""; break; } } bool BestPractices::ValidateDeprecatedExtensions(const char* api_name, const char* extension_name, uint32_t version, const char* vuid) const { bool skip = false; auto dep_info_it = deprecated_extensions.find(extension_name); if (dep_info_it != deprecated_extensions.end()) { auto dep_info = dep_info_it->second; if (((dep_info.target.compare("VK_VERSION_1_1") == 0) && (version >= VK_API_VERSION_1_1)) || ((dep_info.target.compare("VK_VERSION_1_2") == 0) && (version >= VK_API_VERSION_1_2))) { skip |= LogWarning(instance, vuid, "%s(): Attempting to enable deprecated extension %s, but this extension has been %s %s.", api_name, extension_name, DepReasonToString(dep_info.reason), (dep_info.target).c_str()); } else if (dep_info.target.find("VK_VERSION") == std::string::npos) { if (dep_info.target.length() == 0) { skip |= LogWarning(instance, vuid, "%s(): Attempting to enable deprecated extension %s, but this extension has been deprecated " "without replacement.", api_name, extension_name); } else { skip |= LogWarning(instance, vuid, "%s(): Attempting to enable deprecated extension %s, but this extension has been %s %s.", api_name, extension_name, DepReasonToString(dep_info.reason), (dep_info.target).c_str()); } } } return skip; } bool BestPractices::ValidateSpecialUseExtensions(const char* api_name, const char* extension_name, const char* vuid) const { bool skip = false; auto dep_info_it = special_use_extensions.find(extension_name); if (dep_info_it != special_use_extensions.end()) { auto special_uses = dep_info_it->second; std::string message("is intended to support the following uses: "); if (special_uses.find("cadsupport") != std::string::npos) { message.append("specialized functionality used by CAD/CAM applications, "); } if (special_uses.find("d3demulation") != std::string::npos) { message.append("D3D emulation layers, and applications ported from D3D, by adding functionality specific to D3D, "); } if (special_uses.find("devtools") != std::string::npos) { message.append(" developer tools such as capture-replay libraries, "); } if (special_uses.find("debugging") != std::string::npos) { message.append("use by applications when debugging, "); } if (special_uses.find("glemulation") != std::string::npos) { message.append( "OpenGL and/or OpenGL ES emulation layers, and applications ported from those APIs, by adding functionality " "specific to those APIs, "); } message.append("and it is strongly recommended that they be otherwise avoided"); skip |= LogWarning(instance, vuid, "%s(): Attempting to enable extension %s, but this extension %s.", api_name, extension_name, message.c_str()); } return skip; } bool BestPractices::PreCallValidateCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance) const { bool skip = false; for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { if (white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) { skip |= LogWarning(instance, kVUID_BestPractices_CreateInstance_ExtensionMismatch, "vkCreateInstance(): Attempting to enable Device Extension %s at CreateInstance time.", pCreateInfo->ppEnabledExtensionNames[i]); } uint32_t specified_version = (pCreateInfo->pApplicationInfo ? pCreateInfo->pApplicationInfo->apiVersion : VK_API_VERSION_1_0); skip |= ValidateDeprecatedExtensions("CreateInstance", pCreateInfo->ppEnabledExtensionNames[i], specified_version, kVUID_BestPractices_CreateInstance_DeprecatedExtension); skip |= ValidateSpecialUseExtensions("CreateInstance", pCreateInfo->ppEnabledExtensionNames[i], kVUID_BestPractices_CreateInstance_SpecialUseExtension); } return skip; } void BestPractices::PreCallRecordCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance) { ValidationStateTracker::PreCallRecordCreateInstance(pCreateInfo, pAllocator, pInstance); if (pCreateInfo != nullptr && pCreateInfo->pApplicationInfo != nullptr) { instance_api_version = pCreateInfo->pApplicationInfo->apiVersion; } else { instance_api_version = 0; } } bool BestPractices::PreCallValidateCreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice) const { bool skip = false; // get API version of physical device passed when creating device. VkPhysicalDeviceProperties physical_device_properties{}; DispatchGetPhysicalDeviceProperties(physicalDevice, &physical_device_properties); auto device_api_version = physical_device_properties.apiVersion; // check api versions and warn if instance api Version is higher than version on device. if (instance_api_version > device_api_version) { std::string inst_api_name = StringAPIVersion(instance_api_version); std::string dev_api_name = StringAPIVersion(device_api_version); skip |= LogWarning(device, kVUID_BestPractices_CreateDevice_API_Mismatch, "vkCreateDevice(): API Version of current instance, %s is higher than API Version on device, %s", inst_api_name.c_str(), dev_api_name.c_str()); } for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { if (white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) { skip |= LogWarning(instance, kVUID_BestPractices_CreateDevice_ExtensionMismatch, "vkCreateDevice(): Attempting to enable Instance Extension %s at CreateDevice time.", pCreateInfo->ppEnabledExtensionNames[i]); } skip |= ValidateDeprecatedExtensions("CreateDevice", pCreateInfo->ppEnabledExtensionNames[i], instance_api_version, kVUID_BestPractices_CreateDevice_DeprecatedExtension); skip |= ValidateSpecialUseExtensions("CreateInstance", pCreateInfo->ppEnabledExtensionNames[i], kVUID_BestPractices_CreateDevice_SpecialUseExtension); } const auto bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if ((bp_pd_state->vkGetPhysicalDeviceFeaturesState == UNCALLED) && (pCreateInfo->pEnabledFeatures != NULL)) { skip |= LogWarning(device, kVUID_BestPractices_CreateDevice_PDFeaturesNotCalled, "vkCreateDevice() called before getting physical device features from vkGetPhysicalDeviceFeatures()."); } if ((VendorCheckEnabled(kBPVendorArm)) && (pCreateInfo->pEnabledFeatures != nullptr) && (pCreateInfo->pEnabledFeatures->robustBufferAccess == VK_TRUE)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateDevice_RobustBufferAccess, "%s vkCreateDevice() called with enabled robustBufferAccess. Use robustBufferAccess as a debugging tool during " "development. Enabling it causes loss in performance for accesses to uniform buffers and shader storage " "buffers. Disable robustBufferAccess in release builds. Only leave it enabled if the application use-case " "requires the additional level of reliability due to the use of unverified user-supplied draw parameters.", VendorSpecificTag(kBPVendorArm)); } return skip; } bool BestPractices::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer) const { bool skip = false; if ((pCreateInfo->queueFamilyIndexCount > 1) && (pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE)) { std::stringstream buffer_hex; buffer_hex << "0x" << std::hex << HandleToUint64(pBuffer); skip |= LogWarning( device, kVUID_BestPractices_SharingModeExclusive, "Warning: Buffer (%s) specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while specifying multiple queues " "(queueFamilyIndexCount of %" PRIu32 ").", buffer_hex.str().c_str(), pCreateInfo->queueFamilyIndexCount); } return skip; } bool BestPractices::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage) const { bool skip = false; if ((pCreateInfo->queueFamilyIndexCount > 1) && (pCreateInfo->sharingMode == VK_SHARING_MODE_EXCLUSIVE)) { std::stringstream image_hex; image_hex << "0x" << std::hex << HandleToUint64(pImage); skip |= LogWarning(device, kVUID_BestPractices_SharingModeExclusive, "Warning: Image (%s) specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while specifying multiple queues " "(queueFamilyIndexCount of %" PRIu32 ").", image_hex.str().c_str(), pCreateInfo->queueFamilyIndexCount); } if (VendorCheckEnabled(kBPVendorArm)) { if (pCreateInfo->samples > kMaxEfficientSamplesArm) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateImage_TooLargeSampleCount, "%s vkCreateImage(): Trying to create an image with %u samples. " "The hardware revision may not have full throughput for framebuffers with more than %u samples.", VendorSpecificTag(kBPVendorArm), static_cast<uint32_t>(pCreateInfo->samples), kMaxEfficientSamplesArm); } if (pCreateInfo->samples > VK_SAMPLE_COUNT_1_BIT && !(pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateImage_NonTransientMSImage, "%s vkCreateImage(): Trying to create a multisampled image, but createInfo.usage did not have " "VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set. Multisampled images may be resolved on-chip, " "and do not need to be backed by physical storage. " "TRANSIENT_ATTACHMENT allows tiled GPUs to not back the multisampled image with physical memory.", VendorSpecificTag(kBPVendorArm)); } } return skip; } void BestPractices::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) { ValidationStateTracker::PreCallRecordDestroyImage(device, image, pAllocator); ReleaseImageUsageState(image); } void BestPractices::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator) { SWAPCHAIN_NODE* chain = GetSwapchainState(swapchain); for (auto& image : chain->images) { ReleaseImageUsageState(image.image_state->image()); } ValidationStateTracker::PreCallRecordDestroySwapchainKHR(device, swapchain, pAllocator); } IMAGE_STATE_BP* BestPractices::GetImageUsageState(VkImage vk_image) { auto itr = imageUsageMap.find(vk_image); if (itr != imageUsageMap.end()) { return &itr->second; } else { auto& state = imageUsageMap[vk_image]; IMAGE_STATE* image = GetImageState(vk_image); state.image = image; state.usages.resize(image->createInfo.arrayLayers); for (auto& mips : state.usages) { mips.resize(image->createInfo.mipLevels, IMAGE_SUBRESOURCE_USAGE_BP::UNDEFINED); } return &state; } } void BestPractices::ReleaseImageUsageState(VkImage image) { auto itr = imageUsageMap.find(image); if (itr != imageUsageMap.end()) { imageUsageMap.erase(itr); } } bool BestPractices::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain) const { bool skip = false; const auto* bp_pd_state = GetPhysicalDeviceStateBP(); if (bp_pd_state) { if (bp_pd_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) { skip |= LogWarning(device, kVUID_BestPractices_Swapchain_GetSurfaceNotCalled, "vkCreateSwapchainKHR() called before getting surface capabilities from " "vkGetPhysicalDeviceSurfaceCapabilitiesKHR()."); } if ((pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) && (bp_pd_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS)) { skip |= LogWarning(device, kVUID_BestPractices_Swapchain_GetSurfaceNotCalled, "vkCreateSwapchainKHR() called before getting surface present mode(s) from " "vkGetPhysicalDeviceSurfacePresentModesKHR()."); } if (bp_pd_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) { skip |= LogWarning( device, kVUID_BestPractices_Swapchain_GetSurfaceNotCalled, "vkCreateSwapchainKHR() called before getting surface format(s) from vkGetPhysicalDeviceSurfaceFormatsKHR()."); } } if ((pCreateInfo->queueFamilyIndexCount > 1) && (pCreateInfo->imageSharingMode == VK_SHARING_MODE_EXCLUSIVE)) { skip |= LogWarning(device, kVUID_BestPractices_SharingModeExclusive, "Warning: A Swapchain is being created which specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while " "specifying multiple queues (queueFamilyIndexCount of %" PRIu32 ").", pCreateInfo->queueFamilyIndexCount); } if (pCreateInfo->minImageCount == 2) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_SuboptimalSwapchainImageCount, "Warning: A Swapchain is being created with minImageCount set to %" PRIu32 ", which means double buffering is going " "to be used. Using double buffering and vsync locks rendering to an integer fraction of the vsync rate. In turn, " "reducing the performance of the application if rendering is slower than vsync. Consider setting minImageCount to " "3 to use triple buffering to maximize performance in such cases.", pCreateInfo->minImageCount); } if (VendorCheckEnabled(kBPVendorArm) && (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR)) { skip |= LogWarning(device, kVUID_BestPractices_CreateSwapchain_PresentMode, "%s Warning: Swapchain is not being created with presentation mode \"VK_PRESENT_MODE_FIFO_KHR\". " "Prefer using \"VK_PRESENT_MODE_FIFO_KHR\" to avoid unnecessary CPU and GPU load and save power. " "Presentation modes which are not FIFO will present the latest available frame and discard other " "frame(s) if any.", VendorSpecificTag(kBPVendorArm)); } return skip; } bool BestPractices::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains) const { bool skip = false; for (uint32_t i = 0; i < swapchainCount; i++) { if ((pCreateInfos[i].queueFamilyIndexCount > 1) && (pCreateInfos[i].imageSharingMode == VK_SHARING_MODE_EXCLUSIVE)) { skip |= LogWarning( device, kVUID_BestPractices_SharingModeExclusive, "Warning: A shared swapchain (index %" PRIu32 ") is being created which specifies a sharing mode of VK_SHARING_MODE_EXCLUSIVE while specifying multiple " "queues (queueFamilyIndexCount of %" PRIu32 ").", i, pCreateInfos[i].queueFamilyIndexCount); } } return skip; } bool BestPractices::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass) const { bool skip = false; for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) { VkFormat format = pCreateInfo->pAttachments[i].format; if (pCreateInfo->pAttachments[i].initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) { if ((FormatIsColor(format) || FormatHasDepth(format)) && pCreateInfo->pAttachments[i].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_BestPractices_RenderPass_Attatchment, "Render pass has an attachment with loadOp == VK_ATTACHMENT_LOAD_OP_LOAD and " "initialLayout == VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you " "intended. Consider using VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the " "image truely is undefined at the start of the render pass."); } if (FormatHasStencil(format) && pCreateInfo->pAttachments[i].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogWarning(device, kVUID_BestPractices_RenderPass_Attatchment, "Render pass has an attachment with stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD " "and initialLayout == VK_IMAGE_LAYOUT_UNDEFINED. This is probably not what you " "intended. Consider using VK_ATTACHMENT_LOAD_OP_DONT_CARE instead if the " "image truely is undefined at the start of the render pass."); } } const auto& attachment = pCreateInfo->pAttachments[i]; if (attachment.samples > VK_SAMPLE_COUNT_1_BIT) { bool access_requires_memory = attachment.loadOp == VK_ATTACHMENT_LOAD_OP_LOAD || attachment.storeOp == VK_ATTACHMENT_STORE_OP_STORE; if (FormatHasStencil(format)) { access_requires_memory |= attachment.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD || attachment.stencilStoreOp == VK_ATTACHMENT_STORE_OP_STORE; } if (access_requires_memory) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateRenderPass_ImageRequiresMemory, "Attachment %u in the VkRenderPass is a multisampled image with %u samples, but it uses loadOp/storeOp " "which requires accessing data from memory. Multisampled images should always be loadOp = CLEAR or DONT_CARE, " "storeOp = DONT_CARE. This allows the implementation to use lazily allocated memory effectively.", i, static_cast<uint32_t>(attachment.samples)); } } } for (uint32_t dependency = 0; dependency < pCreateInfo->dependencyCount; dependency++) { skip |= CheckPipelineStageFlags("vkCreateRenderPass", pCreateInfo->pDependencies[dependency].srcStageMask); skip |= CheckPipelineStageFlags("vkCreateRenderPass", pCreateInfo->pDependencies[dependency].dstStageMask); } return skip; } bool BestPractices::ValidateAttachments(const VkRenderPassCreateInfo2* rpci, uint32_t attachmentCount, const VkImageView* image_views) const { bool skip = false; // Check for non-transient attachments that should be transient and vice versa for (uint32_t i = 0; i < attachmentCount; ++i) { const auto& attachment = rpci->pAttachments[i]; bool attachment_should_be_transient = (attachment.loadOp != VK_ATTACHMENT_LOAD_OP_LOAD && attachment.storeOp != VK_ATTACHMENT_STORE_OP_STORE); if (FormatHasStencil(attachment.format)) { attachment_should_be_transient &= (attachment.stencilLoadOp != VK_ATTACHMENT_LOAD_OP_LOAD && attachment.stencilStoreOp != VK_ATTACHMENT_STORE_OP_STORE); } auto view_state = GetImageViewState(image_views[i]); if (view_state) { const auto& ivci = view_state->create_info; const auto& ici = GetImageState(ivci.image)->createInfo; bool image_is_transient = (ici.usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) != 0; // The check for an image that should not be transient applies to all GPUs if (!attachment_should_be_transient && image_is_transient) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateFramebuffer_AttachmentShouldNotBeTransient, "Attachment %u in VkFramebuffer uses loadOp/storeOps which need to access physical memory, " "but the image backing the image view has VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set. " "Physical memory will need to be backed lazily to this image, potentially causing stalls.", i); } bool supports_lazy = false; for (uint32_t j = 0; j < phys_dev_mem_props.memoryTypeCount; j++) { if (phys_dev_mem_props.memoryTypes[j].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) { supports_lazy = true; } } // The check for an image that should be transient only applies to GPUs supporting // lazily allocated memory if (supports_lazy && attachment_should_be_transient && !image_is_transient) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateFramebuffer_AttachmentShouldBeTransient, "Attachment %u in VkFramebuffer uses loadOp/storeOps which never have to be backed by physical memory, " "but the image backing the image view does not have VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set. " "You can save physical memory by using transient attachment backed by lazily allocated memory here.", i); } } } return skip; } bool BestPractices::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer) const { bool skip = false; auto rp_state = GetRenderPassState(pCreateInfo->renderPass); if (rp_state && !(pCreateInfo->flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT)) { skip = ValidateAttachments(rp_state->createInfo.ptr(), pCreateInfo->attachmentCount, pCreateInfo->pAttachments); } return skip; } bool BestPractices::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, void* ads_state_data) const { bool skip = false; skip |= ValidationStateTracker::PreCallValidateAllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets, ads_state_data); if (!skip) { const auto& pool_handle = pAllocateInfo->descriptorPool; auto iter = descriptor_pool_freed_count.find(pool_handle); // if the number of freed sets > 0, it implies they could be recycled instead if desirable // this warning is specific to Arm if (VendorCheckEnabled(kBPVendorArm) && iter != descriptor_pool_freed_count.end() && iter->second > 0) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_AllocateDescriptorSets_SuboptimalReuse, "%s Descriptor set memory was allocated via vkAllocateDescriptorSets() for sets which were previously freed in the " "same logical device. On some drivers or architectures it may be most optimal to re-use existing descriptor sets.", VendorSpecificTag(kBPVendorArm)); } } return skip; } void BestPractices::ManualPostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets, VkResult result, void* ads_state) { if (result == VK_SUCCESS) { // find the free count for the pool we allocated into auto iter = descriptor_pool_freed_count.find(pAllocateInfo->descriptorPool); if (iter != descriptor_pool_freed_count.end()) { // we record successful allocations by subtracting the allocation count from the last recorded free count const auto alloc_count = pAllocateInfo->descriptorSetCount; // clamp the unsigned subtraction to the range [0, last_free_count] if (iter->second > alloc_count) { iter->second -= alloc_count; } else { iter->second = 0; } } } } void BestPractices::PostCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, VkResult result) { ValidationStateTracker::PostCallRecordFreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets, result); if (result == VK_SUCCESS) { // we want to track frees because we're interested in suggesting re-use auto iter = descriptor_pool_freed_count.find(descriptorPool); if (iter == descriptor_pool_freed_count.end()) { descriptor_pool_freed_count.emplace(descriptorPool, descriptorSetCount); } else { iter->second += descriptorSetCount; } } } bool BestPractices::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory) const { bool skip = false; if (num_mem_objects + 1 > kMemoryObjectWarningLimit) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_AllocateMemory_TooManyObjects, "Performance Warning: This app has > %" PRIu32 " memory objects.", kMemoryObjectWarningLimit); } if (pAllocateInfo->allocationSize < kMinDeviceAllocationSize) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_AllocateMemory_SmallAllocation, "vkAllocateMemory(): Allocating a VkDeviceMemory of size %" PRIu64 ". This is a very small allocation (current " "threshold is %" PRIu64 " bytes). " "You should make large allocations and sub-allocate from one large VkDeviceMemory.", pAllocateInfo->allocationSize, kMinDeviceAllocationSize); } // TODO: Insert get check for GetPhysicalDeviceMemoryProperties once the state is tracked in the StateTracker return skip; } void BestPractices::ManualPostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory, VkResult result) { if (result != VK_SUCCESS) { static std::vector<VkResult> error_codes = {VK_ERROR_OUT_OF_HOST_MEMORY, VK_ERROR_OUT_OF_DEVICE_MEMORY, VK_ERROR_TOO_MANY_OBJECTS, VK_ERROR_INVALID_EXTERNAL_HANDLE, VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS}; static std::vector<VkResult> success_codes = {}; ValidateReturnCodes("vkAllocateMemory", result, error_codes, success_codes); return; } num_mem_objects++; } void BestPractices::ValidateReturnCodes(const char* api_name, VkResult result, const std::vector<VkResult>& error_codes, const std::vector<VkResult>& success_codes) const { auto error = std::find(error_codes.begin(), error_codes.end(), result); if (error != error_codes.end()) { static const std::vector<VkResult> common_failure_codes = {VK_ERROR_OUT_OF_DATE_KHR, VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT}; auto common_failure = std::find(common_failure_codes.begin(), common_failure_codes.end(), result); if (common_failure != common_failure_codes.end()) { LogInfo(instance, kVUID_BestPractices_Failure_Result, "%s(): Returned error %s.", api_name, string_VkResult(result)); } else { LogWarning(instance, kVUID_BestPractices_Error_Result, "%s(): Returned error %s.", api_name, string_VkResult(result)); } return; } auto success = std::find(success_codes.begin(), success_codes.end(), result); if (success != success_codes.end()) { LogInfo(instance, kVUID_BestPractices_NonSuccess_Result, "%s(): Returned non-success return code %s.", api_name, string_VkResult(result)); } } bool BestPractices::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator) const { if (memory == VK_NULL_HANDLE) return false; bool skip = false; const DEVICE_MEMORY_STATE* mem_info = ValidationStateTracker::GetDevMemState(memory); for (const auto& node: mem_info->ObjectBindings()) { const auto& obj = node->Handle(); LogObjectList objlist(device); objlist.add(obj); objlist.add(mem_info->mem()); skip |= LogWarning(objlist, layer_name.c_str(), "VK Object %s still has a reference to mem obj %s.", report_data->FormatHandle(obj).c_str(), report_data->FormatHandle(mem_info->mem()).c_str()); } return skip; } void BestPractices::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator) { ValidationStateTracker::PreCallRecordFreeMemory(device, memory, pAllocator); if (memory != VK_NULL_HANDLE) { num_mem_objects--; } } bool BestPractices::ValidateBindBufferMemory(VkBuffer buffer, VkDeviceMemory memory, const char* api_name) const { bool skip = false; const BUFFER_STATE* buffer_state = GetBufferState(buffer); if (!buffer_state->memory_requirements_checked && !buffer_state->external_memory_handle) { skip |= LogWarning(device, kVUID_BestPractices_BufferMemReqNotCalled, "%s: Binding memory to %s but vkGetBufferMemoryRequirements() has not been called on that buffer.", api_name, report_data->FormatHandle(buffer).c_str()); } const DEVICE_MEMORY_STATE* mem_state = GetDevMemState(memory); if (mem_state->alloc_info.allocationSize == buffer_state->createInfo.size && mem_state->alloc_info.allocationSize < kMinDedicatedAllocationSize) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_SmallDedicatedAllocation, "%s: Trying to bind %s to a memory block which is fully consumed by the buffer. " "The required size of the allocation is %" PRIu64 ", but smaller buffers like this should be sub-allocated from " "larger memory blocks. (Current threshold is %" PRIu64 " bytes.)", api_name, report_data->FormatHandle(buffer).c_str(), mem_state->alloc_info.allocationSize, kMinDedicatedAllocationSize); } return skip; } bool BestPractices::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset) const { bool skip = false; const char* api_name = "BindBufferMemory()"; skip |= ValidateBindBufferMemory(buffer, memory, api_name); return skip; } bool BestPractices::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, api_name); } return skip; } bool BestPractices::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindBufferMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindBufferMemory(pBindInfos[i].buffer, pBindInfos[i].memory, api_name); } return skip; } bool BestPractices::ValidateBindImageMemory(VkImage image, VkDeviceMemory memory, const char* api_name) const { bool skip = false; const IMAGE_STATE* image_state = GetImageState(image); if (image_state->disjoint == false) { if (!image_state->memory_requirements_checked && !image_state->external_memory_handle) { skip |= LogWarning(device, kVUID_BestPractices_ImageMemReqNotCalled, "%s: Binding memory to %s but vkGetImageMemoryRequirements() has not been called on that image.", api_name, report_data->FormatHandle(image).c_str()); } } else { // TODO If binding disjoint image then this needs to check that VkImagePlaneMemoryRequirementsInfo was called for each // plane. } const DEVICE_MEMORY_STATE* mem_state = GetDevMemState(memory); if (mem_state->alloc_info.allocationSize == image_state->requirements.size && mem_state->alloc_info.allocationSize < kMinDedicatedAllocationSize) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_SmallDedicatedAllocation, "%s: Trying to bind %s to a memory block which is fully consumed by the image. " "The required size of the allocation is %" PRIu64 ", but smaller images like this should be sub-allocated from " "larger memory blocks. (Current threshold is %" PRIu64 " bytes.)", api_name, report_data->FormatHandle(image).c_str(), mem_state->alloc_info.allocationSize, kMinDedicatedAllocationSize); } // If we're binding memory to a image which was created as TRANSIENT and the image supports LAZY allocation, // make sure this type is actually used. // This warning will only trigger if this layer is run on a platform that supports LAZILY_ALLOCATED_BIT // (i.e.most tile - based renderers) if (image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) { bool supports_lazy = false; uint32_t suggested_type = 0; for (uint32_t i = 0; i < phys_dev_mem_props.memoryTypeCount; i++) { if ((1u << i) & image_state->requirements.memoryTypeBits) { if (phys_dev_mem_props.memoryTypes[i].propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) { supports_lazy = true; suggested_type = i; break; } } } uint32_t allocated_properties = phys_dev_mem_props.memoryTypes[mem_state->alloc_info.memoryTypeIndex].propertyFlags; if (supports_lazy && (allocated_properties & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) == 0) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_NonLazyTransientImage, "%s: Attempting to bind memory type %u to VkImage which was created with TRANSIENT_ATTACHMENT_BIT," "but this memory type is not LAZILY_ALLOCATED_BIT. You should use memory type %u here instead to save " "%" PRIu64 " bytes of physical memory.", api_name, mem_state->alloc_info.memoryTypeIndex, suggested_type, image_state->requirements.size); } } return skip; } bool BestPractices::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset) const { bool skip = false; const char* api_name = "vkBindImageMemory()"; skip |= ValidateBindImageMemory(image, memory, api_name); return skip; } bool BestPractices::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindImageMemory2() pBindInfos[%u]", i); if (!LvlFindInChain<VkBindImageMemorySwapchainInfoKHR>(pBindInfos[i].pNext)) { skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, api_name); } } return skip; } bool BestPractices::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos) const { char api_name[64]; bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { sprintf(api_name, "vkBindImageMemory2KHR() pBindInfos[%u]", i); skip |= ValidateBindImageMemory(pBindInfos[i].image, pBindInfos[i].memory, api_name); } return skip; } static inline bool FormatHasFullThroughputBlendingArm(VkFormat format) { switch (format) { case VK_FORMAT_B10G11R11_UFLOAT_PACK32: case VK_FORMAT_R16_SFLOAT: case VK_FORMAT_R16G16_SFLOAT: case VK_FORMAT_R16G16B16_SFLOAT: case VK_FORMAT_R16G16B16A16_SFLOAT: case VK_FORMAT_R32_SFLOAT: case VK_FORMAT_R32G32_SFLOAT: case VK_FORMAT_R32G32B32_SFLOAT: case VK_FORMAT_R32G32B32A32_SFLOAT: return false; default: return true; } } bool BestPractices::ValidateMultisampledBlendingArm(uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos) const { bool skip = false; for (uint32_t i = 0; i < createInfoCount; i++) { auto create_info = &pCreateInfos[i]; if (!create_info->pColorBlendState || !create_info->pMultisampleState || create_info->pMultisampleState->rasterizationSamples == VK_SAMPLE_COUNT_1_BIT || create_info->pMultisampleState->sampleShadingEnable) { return skip; } auto rp_state = GetRenderPassState(create_info->renderPass); const auto& subpass = rp_state->createInfo.pSubpasses[create_info->subpass]; for (uint32_t j = 0; j < create_info->pColorBlendState->attachmentCount; j++) { const auto& blend_att = create_info->pColorBlendState->pAttachments[j]; uint32_t att = subpass.pColorAttachments[j].attachment; if (att != VK_ATTACHMENT_UNUSED && blend_att.blendEnable && blend_att.colorWriteMask) { if (!FormatHasFullThroughputBlendingArm(rp_state->createInfo.pAttachments[att].format)) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CreatePipelines_MultisampledBlending, "%s vkCreateGraphicsPipelines() - createInfo #%u: Pipeline is multisampled and " "color attachment #%u makes use " "of a format which cannot be blended at full throughput when using MSAA.", VendorSpecificTag(kBPVendorArm), i, j); } } } } return skip; } bool BestPractices::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, cgpl_state_data); create_graphics_pipeline_api_state* cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state*>(cgpl_state_data); if ((createInfoCount > 1) && (!pipelineCache)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreatePipelines_MultiplePipelines, "Performance Warning: This vkCreateGraphicsPipelines call is creating multiple pipelines but is not using a " "pipeline cache, which may help with performance"); } for (uint32_t i = 0; i < createInfoCount; i++) { const auto& create_info = pCreateInfos[i]; if (!(cgpl_state->pipe_state[i]->active_shaders & VK_SHADER_STAGE_MESH_BIT_NV)) { const auto& vertex_input = *create_info.pVertexInputState; uint32_t count = 0; for (uint32_t j = 0; j < vertex_input.vertexBindingDescriptionCount; j++) { if (vertex_input.pVertexBindingDescriptions[j].inputRate == VK_VERTEX_INPUT_RATE_INSTANCE) { count++; } } if (count > kMaxInstancedVertexBuffers) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreatePipelines_TooManyInstancedVertexBuffers, "The pipeline is using %u instanced vertex buffers (current limit: %u), but this can be inefficient on the " "GPU. If using instanced vertex attributes prefer interleaving them in a single buffer.", count, kMaxInstancedVertexBuffers); } } if ((pCreateInfos[i].pRasterizationState->depthBiasEnable) && (pCreateInfos[i].pRasterizationState->depthBiasConstantFactor == 0.0f) && (pCreateInfos[i].pRasterizationState->depthBiasSlopeFactor == 0.0f) && VendorCheckEnabled(kBPVendorArm)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreatePipelines_DepthBias_Zero, "%s Performance Warning: This vkCreateGraphicsPipelines call is created with depthBiasEnable set to true " "and both depthBiasConstantFactor and depthBiasSlopeFactor are set to 0. This can cause reduced " "efficiency during rasterization. Consider disabling depthBias or increasing either " "depthBiasConstantFactor or depthBiasSlopeFactor.", VendorSpecificTag(kBPVendorArm)); } skip |= VendorCheckEnabled(kBPVendorArm) && ValidateMultisampledBlendingArm(createInfoCount, pCreateInfos); } return skip; } void BestPractices::ManualPostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result, void* cgpl_state_data) { for (size_t i = 0; i < count; i++) { const auto* cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state*>(cgpl_state_data); const VkPipeline pipeline_handle = pPipelines[i]; // record depth stencil state and color blend states for depth pre-pass tracking purposes auto gp_cis = graphicsPipelineCIs.find(pipeline_handle); // add the tracking state if it doesn't exist if (gp_cis == graphicsPipelineCIs.end()) { auto result = graphicsPipelineCIs.emplace(pipeline_handle, GraphicsPipelineCIs{}); if (!result.second) continue; gp_cis = result.first; } gp_cis->second.colorBlendStateCI = cgpl_state->pCreateInfos[i].pColorBlendState ? new safe_VkPipelineColorBlendStateCreateInfo(cgpl_state->pCreateInfos[i].pColorBlendState) : nullptr; gp_cis->second.depthStencilStateCI = cgpl_state->pCreateInfos[i].pDepthStencilState ? new safe_VkPipelineDepthStencilStateCreateInfo(cgpl_state->pCreateInfos[i].pDepthStencilState) : nullptr; } } bool BestPractices::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* ccpl_state_data) const { bool skip = StateTracker::PreCallValidateCreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, ccpl_state_data); if ((createInfoCount > 1) && (!pipelineCache)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreatePipelines_MultiplePipelines, "Performance Warning: This vkCreateComputePipelines call is creating multiple pipelines but is not using a " "pipeline cache, which may help with performance"); } if (VendorCheckEnabled(kBPVendorArm)) { for (size_t i = 0; i < createInfoCount; i++) { skip |= ValidateCreateComputePipelineArm(pCreateInfos[i]); } } return skip; } bool BestPractices::ValidateCreateComputePipelineArm(const VkComputePipelineCreateInfo& createInfo) const { bool skip = false; auto* module = GetShaderModuleState(createInfo.stage.module); // Generate warnings about work group sizes based on active resources. auto entrypoint = module->FindEntrypoint(createInfo.stage.pName, createInfo.stage.stage); if (entrypoint == module->end()) return false; uint32_t x = 1, y = 1, z = 1; module->FindLocalSize(entrypoint, x, y, z); uint32_t thread_count = x * y * z; // Generate a priori warnings about work group sizes. if (thread_count > kMaxEfficientWorkGroupThreadCountArm) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateComputePipelines_ComputeWorkGroupSize, "%s vkCreateComputePipelines(): compute shader with work group dimensions (%u, %u, " "%u) (%u threads total), has more threads than advised in a single work group. It is advised to use work " "groups with less than %u threads, especially when using barrier() or shared memory.", VendorSpecificTag(kBPVendorArm), x, y, z, thread_count, kMaxEfficientWorkGroupThreadCountArm); } if (thread_count == 1 || ((x > 1) && (x & (kThreadGroupDispatchCountAlignmentArm - 1))) || ((y > 1) && (y & (kThreadGroupDispatchCountAlignmentArm - 1))) || ((z > 1) && (z & (kThreadGroupDispatchCountAlignmentArm - 1)))) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CreateComputePipelines_ComputeThreadGroupAlignment, "%s vkCreateComputePipelines(): compute shader with work group dimensions (%u, " "%u, %u) is not aligned to %u " "threads. On Arm Mali architectures, not aligning work group sizes to %u may " "leave threads idle on the shader " "core.", VendorSpecificTag(kBPVendorArm), x, y, z, kThreadGroupDispatchCountAlignmentArm, kThreadGroupDispatchCountAlignmentArm); } bool has_writeable_descriptors = false; bool has_atomic_descriptors = false; auto accessible_ids = module->MarkAccessibleIds(entrypoint); auto descriptor_uses = module->CollectInterfaceByDescriptorSlot(accessible_ids, &has_writeable_descriptors, &has_atomic_descriptors); unsigned dimensions = 0; if (x > 1) dimensions++; if (y > 1) dimensions++; if (z > 1) dimensions++; // Here the dimension will really depend on the dispatch grid, but assume it's 1D. dimensions = std::max(dimensions, 1u); // If we're accessing images, we almost certainly want to have a 2D workgroup for cache reasons. // There are some false positives here. We could simply have a shader that does this within a 1D grid, // or we may have a linearly tiled image, but these cases are quite unlikely in practice. bool accesses_2d = false; for (const auto& usage : descriptor_uses) { auto dim = module->GetShaderResourceDimensionality(usage.second); if (dim < 0) continue; auto spvdim = spv::Dim(dim); if (spvdim != spv::Dim1D && spvdim != spv::DimBuffer) accesses_2d = true; } if (accesses_2d && dimensions < 2) { LogPerformanceWarning(device, kVUID_BestPractices_CreateComputePipelines_ComputeSpatialLocality, "%s vkCreateComputePipelines(): compute shader has work group dimensions (%u, %u, %u), which " "suggests a 1D dispatch, but the shader is accessing 2D or 3D images. The shader may be " "exhibiting poor spatial locality with respect to one or more shader resources.", VendorSpecificTag(kBPVendorArm), x, y, z); } return skip; } bool BestPractices::CheckPipelineStageFlags(const std::string& api_name, VkPipelineStageFlags flags) const { bool skip = false; if (flags & VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT) { skip |= LogWarning(device, kVUID_BestPractices_PipelineStageFlags, "You are using VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT when %s is called\n", api_name.c_str()); } else if (flags & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) { skip |= LogWarning(device, kVUID_BestPractices_PipelineStageFlags, "You are using VK_PIPELINE_STAGE_ALL_COMMANDS_BIT when %s is called\n", api_name.c_str()); } return skip; } bool BestPractices::CheckPipelineStageFlags(const std::string& api_name, VkPipelineStageFlags2KHR flags) const { bool skip = false; if (flags & VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR) { skip |= LogWarning(device, kVUID_BestPractices_PipelineStageFlags, "You are using VK_PIPELINE_STAGE_2_ALL_GRAPHICS_BIT_KHR when %s is called\n", api_name.c_str()); } else if (flags & VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR) { skip |= LogWarning(device, kVUID_BestPractices_PipelineStageFlags, "You are using VK_PIPELINE_STAGE_2_ALL_COMMANDS_BIT_KHR when %s is called\n", api_name.c_str()); } return skip; } bool BestPractices::CheckDependencyInfo(const std::string& api_name, const VkDependencyInfoKHR& dep_info) const { bool skip = false; auto stage_masks = sync_utils::GetGlobalStageMasks(dep_info); skip |= CheckPipelineStageFlags(api_name, stage_masks.src); skip |= CheckPipelineStageFlags(api_name, stage_masks.dst); return skip; } void BestPractices::ManualPostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo, VkResult result) { for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) { auto swapchains_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result; if (swapchains_result == VK_SUBOPTIMAL_KHR) { LogPerformanceWarning( pPresentInfo->pSwapchains[i], kVUID_BestPractices_SuboptimalSwapchain, "vkQueuePresentKHR: %s :VK_SUBOPTIMAL_KHR was returned. VK_SUBOPTIMAL_KHR - Presentation will still succeed, " "subject to the window resize behavior, but the swapchain is no longer configured optimally for the surface it " "targets. Applications should query updated surface information and recreate their swapchain at the next " "convenient opportunity.", report_data->FormatHandle(pPresentInfo->pSwapchains[i]).c_str()); } } } bool BestPractices::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) const { bool skip = false; for (uint32_t submit = 0; submit < submitCount; submit++) { for (uint32_t semaphore = 0; semaphore < pSubmits[submit].waitSemaphoreCount; semaphore++) { skip |= CheckPipelineStageFlags("vkQueueSubmit", pSubmits[submit].pWaitDstStageMask[semaphore]); } } return skip; } bool BestPractices::PreCallValidateQueueSubmit2KHR(VkQueue queue, uint32_t submitCount, const VkSubmitInfo2KHR* pSubmits, VkFence fence) const { bool skip = false; for (uint32_t submit = 0; submit < submitCount; submit++) { for (uint32_t semaphore = 0; semaphore < pSubmits[submit].waitSemaphoreInfoCount; semaphore++) { skip |= CheckPipelineStageFlags("vkQueueSubmit2KHR", pSubmits[submit].pWaitSemaphoreInfos[semaphore].stageMask); } } return skip; } bool BestPractices::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool) const { bool skip = false; if (pCreateInfo->flags & VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateCommandPool_CommandBufferReset, "vkCreateCommandPool(): VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT is set. Consider resetting entire " "pool instead."); } return skip; } bool BestPractices::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo) const { bool skip = false; if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_BeginCommandBuffer_SimultaneousUse, "vkBeginCommandBuffer(): VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT is set."); } if (!(pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && VendorCheckEnabled(kBPVendorArm)) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_BeginCommandBuffer_OneTimeSubmit, "%s vkBeginCommandBuffer(): VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT is not set. " "For best performance on Mali GPUs, consider setting ONE_TIME_SUBMIT by default.", VendorSpecificTag(kBPVendorArm)); } return skip; } bool BestPractices::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdSetEvent", stageMask); return skip; } bool BestPractices::PreCallValidateCmdSetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, const VkDependencyInfoKHR* pDependencyInfo) const { return CheckDependencyInfo("vkCmdSetEvent2KHR", *pDependencyInfo); } bool BestPractices::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdResetEvent", stageMask); return skip; } bool BestPractices::PreCallValidateCmdResetEvent2KHR(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags2KHR stageMask) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdResetEvent2KHR", stageMask); return skip; } bool BestPractices::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdWaitEvents", srcStageMask); skip |= CheckPipelineStageFlags("vkCmdWaitEvents", dstStageMask); return skip; } bool BestPractices::PreCallValidateCmdWaitEvents2KHR(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, const VkDependencyInfoKHR* pDependencyInfos) const { bool skip = false; for (uint32_t i = 0; i < eventCount; i++) { skip = CheckDependencyInfo("vkCmdWaitEvents2KHR", pDependencyInfos[i]); } return skip; } bool BestPractices::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdPipelineBarrier", srcStageMask); skip |= CheckPipelineStageFlags("vkCmdPipelineBarrier", dstStageMask); return skip; } bool BestPractices::PreCallValidateCmdPipelineBarrier2KHR(VkCommandBuffer commandBuffer, const VkDependencyInfoKHR* pDependencyInfo) const { return CheckDependencyInfo("vkCmdPipelineBarrier2KHR", *pDependencyInfo); } bool BestPractices::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdWriteTimestamp", static_cast<VkPipelineStageFlags>(pipelineStage)); return skip; } bool BestPractices::PreCallValidateCmdWriteTimestamp2KHR(VkCommandBuffer commandBuffer, VkPipelineStageFlags2KHR pipelineStage, VkQueryPool queryPool, uint32_t query) const { bool skip = false; skip |= CheckPipelineStageFlags("vkCmdWriteTimestamp2KHR", pipelineStage); return skip; } void BestPractices::PostCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) { StateTracker::PostCallRecordCmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline); if (pipelineBindPoint == VK_PIPELINE_BIND_POINT_GRAPHICS) { // check for depth/blend state tracking auto gp_cis = graphicsPipelineCIs.find(pipeline); if (gp_cis != graphicsPipelineCIs.end()) { auto prepass_state = cbDepthPrePassStates.find(commandBuffer); if (prepass_state == cbDepthPrePassStates.end()) { auto result = cbDepthPrePassStates.emplace(commandBuffer, DepthPrePassState{}); if (!result.second) return; prepass_state = result.first; } const auto* blend_state = gp_cis->second.colorBlendStateCI; const auto* stencil_state = gp_cis->second.depthStencilStateCI; if (blend_state) { // assume the pipeline is depth-only unless any of the attachments have color writes enabled prepass_state->second.depthOnly = true; for (size_t i = 0; i < blend_state->attachmentCount; i++) { if (blend_state->pAttachments[i].colorWriteMask != 0) { prepass_state->second.depthOnly = false; } } } // check for depth value usage prepass_state->second.depthEqualComparison = false; if (stencil_state && stencil_state->depthTestEnable) { switch (stencil_state->depthCompareOp) { case VK_COMPARE_OP_EQUAL: case VK_COMPARE_OP_GREATER_OR_EQUAL: case VK_COMPARE_OP_LESS_OR_EQUAL: prepass_state->second.depthEqualComparison = true; break; default: break; } } } else { // reset depth pre-pass tracking cbDepthPrePassStates.emplace(commandBuffer, DepthPrePassState{}); } } } static inline bool RenderPassUsesAttachmentOnTile(const safe_VkRenderPassCreateInfo2& createInfo, uint32_t attachment) { for (uint32_t subpass = 0; subpass < createInfo.subpassCount; subpass++) { const auto& subpass_info = createInfo.pSubpasses[subpass]; // If an attachment is ever used as a color attachment, // resolve attachment or depth stencil attachment, // it needs to exist on tile at some point. for (uint32_t i = 0; i < subpass_info.colorAttachmentCount; i++) { if (subpass_info.pColorAttachments[i].attachment == attachment) return true; } if (subpass_info.pResolveAttachments) { for (uint32_t i = 0; i < subpass_info.colorAttachmentCount; i++) { if (subpass_info.pResolveAttachments[i].attachment == attachment) return true; } } if (subpass_info.pDepthStencilAttachment && subpass_info.pDepthStencilAttachment->attachment == attachment) return true; } return false; } static inline bool RenderPassUsesAttachmentAsImageOnly(const safe_VkRenderPassCreateInfo2& createInfo, uint32_t attachment) { if (RenderPassUsesAttachmentOnTile(createInfo, attachment)) { return false; } for (uint32_t subpass = 0; subpass < createInfo.subpassCount; subpass++) { const auto& subpassInfo = createInfo.pSubpasses[subpass]; for (uint32_t i = 0; i < subpassInfo.inputAttachmentCount; i++) { if (subpassInfo.pInputAttachments[i].attachment == attachment) { return true; } } } return false; } bool BestPractices::ValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo* pRenderPassBegin) const { bool skip = false; if (!pRenderPassBegin) { return skip; } auto rp_state = GetRenderPassState(pRenderPassBegin->renderPass); if (rp_state) { if (rp_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) { const VkRenderPassAttachmentBeginInfo* rpabi = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBegin->pNext); if (rpabi) { skip = ValidateAttachments(rp_state->createInfo.ptr(), rpabi->attachmentCount, rpabi->pAttachments); } } // Check if any attachments have LOAD operation on them for (uint32_t att = 0; att < rp_state->createInfo.attachmentCount; att++) { const auto& attachment = rp_state->createInfo.pAttachments[att]; bool attachment_has_readback = false; if (!FormatHasStencil(attachment.format) && attachment.loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { attachment_has_readback = true; } if (FormatHasStencil(attachment.format) && attachment.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { attachment_has_readback = true; } bool attachment_needs_readback = false; // Check if the attachment is actually used in any subpass on-tile if (attachment_has_readback && RenderPassUsesAttachmentOnTile(rp_state->createInfo, att)) { attachment_needs_readback = true; } // Using LOAD_OP_LOAD is expensive on tiled GPUs, so flag it as a potential improvement if (attachment_needs_readback && VendorCheckEnabled(kBPVendorArm)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_BeginRenderPass_AttachmentNeedsReadback, "%s Attachment #%u in render pass has begun with VK_ATTACHMENT_LOAD_OP_LOAD.\n" "Submitting this renderpass will cause the driver to inject a readback of the attachment " "which will copy in total %u pixels (renderArea = { %d, %d, %u, %u }) to the tile buffer.", VendorSpecificTag(kBPVendorArm), att, pRenderPassBegin->renderArea.extent.width * pRenderPassBegin->renderArea.extent.height, pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width, pRenderPassBegin->renderArea.extent.height); } } } return skip; } void BestPractices::QueueValidateImageView(QueueCallbacks &funcs, const char* function_name, IMAGE_VIEW_STATE* view, IMAGE_SUBRESOURCE_USAGE_BP usage) { if (view) { QueueValidateImage(funcs, function_name, GetImageUsageState(view->create_info.image), usage, view->create_info.subresourceRange); } } void BestPractices::QueueValidateImage(QueueCallbacks &funcs, const char* function_name, IMAGE_STATE_BP* state, IMAGE_SUBRESOURCE_USAGE_BP usage, const VkImageSubresourceRange& subresource_range) { IMAGE_STATE* image = state->image; // If we're viewing a 3D slice, ignore base array layer. // The entire 3D subresource is accessed as one atomic unit. const uint32_t base_array_layer = image->createInfo.imageType == VK_IMAGE_TYPE_3D ? 0 : subresource_range.baseArrayLayer; const uint32_t max_layers = image->createInfo.arrayLayers - base_array_layer; const uint32_t array_layers = std::min(subresource_range.layerCount, max_layers); const uint32_t max_levels = image->createInfo.mipLevels - subresource_range.baseMipLevel; const uint32_t mip_levels = std::min(image->createInfo.mipLevels, max_levels); for (uint32_t layer = 0; layer < array_layers; layer++) { for (uint32_t level = 0; level < mip_levels; level++) { QueueValidateImage(funcs, function_name, state, usage, layer + base_array_layer, level + subresource_range.baseMipLevel); } } } void BestPractices::QueueValidateImage(QueueCallbacks &funcs, const char* function_name, IMAGE_STATE_BP* state, IMAGE_SUBRESOURCE_USAGE_BP usage, const VkImageSubresourceLayers& subresource_layers) { IMAGE_STATE* image = state->image; const uint32_t max_layers = image->createInfo.arrayLayers - subresource_layers.baseArrayLayer; const uint32_t array_layers = std::min(subresource_layers.layerCount, max_layers); for (uint32_t layer = 0; layer < array_layers; layer++) { QueueValidateImage(funcs, function_name, state, usage, layer + subresource_layers.baseArrayLayer, subresource_layers.mipLevel); } } void BestPractices::QueueValidateImage(QueueCallbacks &funcs, const char* function_name, IMAGE_STATE_BP* state, IMAGE_SUBRESOURCE_USAGE_BP usage, uint32_t array_layer, uint32_t mip_level) { funcs.push_back([this, function_name, state, usage, array_layer, mip_level](const ValidationStateTracker*, const QUEUE_STATE*) -> bool { ValidateImageInQueue(function_name, state, usage, array_layer, mip_level); return false; }); } void BestPractices::ValidateImageInQueueArm(const char* function_name, IMAGE_STATE* image, IMAGE_SUBRESOURCE_USAGE_BP last_usage, IMAGE_SUBRESOURCE_USAGE_BP usage, uint32_t array_layer, uint32_t mip_level) { // Swapchain images are implicitly read so clear after store is expected. if (usage == IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_CLEARED && last_usage == IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_STORED && !image->is_swapchain_image) { LogPerformanceWarning( device, kVUID_BestPractices_RenderPass_RedundantStore, "%s: %s Subresource (arrayLayer: %u, mipLevel: %u) of image was cleared as part of LOAD_OP_CLEAR, but last time " "image was used, it was written to with STORE_OP_STORE. " "Storing to the image is probably redundant in this case, and wastes bandwidth on tile-based " "architectures.", function_name, VendorSpecificTag(kBPVendorArm), array_layer, mip_level); } else if (usage == IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_CLEARED && last_usage == IMAGE_SUBRESOURCE_USAGE_BP::CLEARED) { LogPerformanceWarning( device, kVUID_BestPractices_RenderPass_RedundantClear, "%s: %s Subresource (arrayLayer: %u, mipLevel: %u) of image was cleared as part of LOAD_OP_CLEAR, but last time " "image was used, it was written to with vkCmdClear*Image(). " "Clearing the image with vkCmdClear*Image() is probably redundant in this case, and wastes bandwidth on " "tile-based architectures." "architectures.", function_name, VendorSpecificTag(kBPVendorArm), array_layer, mip_level); } else if (usage == IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_READ_TO_TILE && (last_usage == IMAGE_SUBRESOURCE_USAGE_BP::BLIT_WRITE || last_usage == IMAGE_SUBRESOURCE_USAGE_BP::CLEARED || last_usage == IMAGE_SUBRESOURCE_USAGE_BP::COPY_WRITE || last_usage == IMAGE_SUBRESOURCE_USAGE_BP::RESOLVE_WRITE)) { const char *last_cmd = nullptr; const char *vuid = nullptr; const char *suggestion = nullptr; switch (last_usage) { case IMAGE_SUBRESOURCE_USAGE_BP::BLIT_WRITE: vuid = kVUID_BestPractices_RenderPass_BlitImage_LoadOpLoad; last_cmd = "vkCmdBlitImage"; suggestion = "The blit is probably redundant in this case, and wastes bandwidth on tile-based architectures. " "Rather than blitting, just render the source image in a fragment shader in this render pass, " "which avoids the memory roundtrip."; break; case IMAGE_SUBRESOURCE_USAGE_BP::CLEARED: vuid = kVUID_BestPractices_RenderPass_InefficientClear; last_cmd = "vkCmdClear*Image"; suggestion = "Clearing the image with vkCmdClear*Image() is probably redundant in this case, and wastes bandwidth on " "tile-based architectures. " "Use LOAD_OP_CLEAR instead to clear the image for free."; break; case IMAGE_SUBRESOURCE_USAGE_BP::COPY_WRITE: vuid = kVUID_BestPractices_RenderPass_CopyImage_LoadOpLoad; last_cmd = "vkCmdCopy*Image"; suggestion = "The copy is probably redundant in this case, and wastes bandwidth on tile-based architectures. " "Rather than copying, just render the source image in a fragment shader in this render pass, " "which avoids the memory roundtrip."; break; case IMAGE_SUBRESOURCE_USAGE_BP::RESOLVE_WRITE: vuid = kVUID_BestPractices_RenderPass_ResolveImage_LoadOpLoad; last_cmd = "vkCmdResolveImage"; suggestion = "The resolve is probably redundant in this case, and wastes a lot of bandwidth on tile-based architectures. " "Rather than resolving, and then loading, try to keep rendering in the same render pass, " "which avoids the memory roundtrip."; break; default: break; } LogPerformanceWarning( device, vuid, "%s: %s Subresource (arrayLayer: %u, mipLevel: %u) of image was loaded to tile as part of LOAD_OP_LOAD, but last " "time image was used, it was written to with %s. %s", function_name, VendorSpecificTag(kBPVendorArm), array_layer, mip_level, last_cmd, suggestion); } } void BestPractices::ValidateImageInQueue(const char* function_name, IMAGE_STATE_BP* state, IMAGE_SUBRESOURCE_USAGE_BP usage, uint32_t array_layer, uint32_t mip_level) { IMAGE_STATE* image = state->image; IMAGE_SUBRESOURCE_USAGE_BP last_usage = state->usages[array_layer][mip_level]; state->usages[array_layer][mip_level] = usage; if (VendorCheckEnabled(kBPVendorArm)) { ValidateImageInQueueArm(function_name, image, last_usage, usage, array_layer, mip_level); } } void BestPractices::AddDeferredQueueOperations(CMD_BUFFER_STATE* cb) { cb->queue_submit_functions.insert(cb->queue_submit_functions.end(), queue_submit_functions_after_render_pass.begin(), queue_submit_functions_after_render_pass.end()); queue_submit_functions_after_render_pass.clear(); } void BestPractices::PreCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) { ValidationStateTracker::PreCallRecordCmdEndRenderPass(commandBuffer); AddDeferredQueueOperations(GetCBState(commandBuffer)); } void BestPractices::PreCallRecordCmdEndRenderPass2(VkCommandBuffer commandBuffer, const VkSubpassEndInfo *pSubpassInfo) { ValidationStateTracker::PreCallRecordCmdEndRenderPass2(commandBuffer, pSubpassInfo); AddDeferredQueueOperations(GetCBState(commandBuffer)); } void BestPractices::PreCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassInfo) { ValidationStateTracker::PreCallRecordCmdEndRenderPass2KHR(commandBuffer, pSubpassInfo); AddDeferredQueueOperations(GetCBState(commandBuffer)); } void BestPractices::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents) { ValidationStateTracker::PreCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); if (!pRenderPassBegin) { return; } CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto rp_state = GetRenderPassState(pRenderPassBegin->renderPass); if (rp_state) { // Check load ops for (uint32_t att = 0; att < rp_state->createInfo.attachmentCount; att++) { const auto& attachment = rp_state->createInfo.pAttachments[att]; if (!RenderPassUsesAttachmentAsImageOnly(rp_state->createInfo, att) && !RenderPassUsesAttachmentOnTile(rp_state->createInfo, att)) { continue; } IMAGE_SUBRESOURCE_USAGE_BP usage = IMAGE_SUBRESOURCE_USAGE_BP::UNDEFINED; if ((!FormatIsStencilOnly(attachment.format) && attachment.loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) || (FormatHasStencil(attachment.format) && attachment.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD)) { usage = IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_READ_TO_TILE; } else if ((!FormatIsStencilOnly(attachment.format) && attachment.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) || (FormatHasStencil(attachment.format) && attachment.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR)) { usage = IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_CLEARED; } else if (RenderPassUsesAttachmentAsImageOnly(rp_state->createInfo, att)) { usage = IMAGE_SUBRESOURCE_USAGE_BP::DESCRIPTOR_ACCESS; } auto framebuffer = GetFramebufferState(pRenderPassBegin->framebuffer); IMAGE_VIEW_STATE* image_view = nullptr; if (rp_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) { const VkRenderPassAttachmentBeginInfo* rpabi = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBegin->pNext); if (rpabi) { image_view = GetImageViewState(rpabi->pAttachments[att]); } } else { image_view = GetImageViewState(framebuffer->createInfo.pAttachments[att]); } QueueValidateImageView(cb->queue_submit_functions, "vkCmdBeginRenderPass()", image_view, usage); } // Check store ops for (uint32_t att = 0; att < rp_state->createInfo.attachmentCount; att++) { const auto& attachment = rp_state->createInfo.pAttachments[att]; if (!RenderPassUsesAttachmentOnTile(rp_state->createInfo, att)) { continue; } IMAGE_SUBRESOURCE_USAGE_BP usage = IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_DISCARDED; if ((!FormatIsStencilOnly(attachment.format) && attachment.storeOp == VK_ATTACHMENT_STORE_OP_STORE) || (FormatHasStencil(attachment.format) && attachment.stencilStoreOp == VK_ATTACHMENT_STORE_OP_STORE)) { usage = IMAGE_SUBRESOURCE_USAGE_BP::RENDER_PASS_STORED; } auto framebuffer = GetFramebufferState(pRenderPassBegin->framebuffer); IMAGE_VIEW_STATE* image_view = nullptr; if (rp_state->createInfo.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) { const VkRenderPassAttachmentBeginInfo* rpabi = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBegin->pNext); if (rpabi) { image_view = GetImageViewState(rpabi->pAttachments[att]); } } else { image_view = GetImageViewState(framebuffer->createInfo.pAttachments[att]); } QueueValidateImageView(queue_submit_functions_after_render_pass, "vkCmdEndRenderPass()", image_view, usage); } } } bool BestPractices::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents) const { bool skip = StateTracker::PreCallValidateCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); skip |= ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin); return skip; } bool BestPractices::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo) const { bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); skip |= ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } bool BestPractices::PreCallValidateCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo) const { bool skip = StateTracker::PreCallValidateCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); skip |= ValidateCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); return skip; } void BestPractices::RecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo* pRenderPassBegin) { auto prepass_state = cbDepthPrePassStates.find(commandBuffer); // add the tracking state if it doesn't exist if (prepass_state == cbDepthPrePassStates.end()) { auto result = cbDepthPrePassStates.emplace(commandBuffer, DepthPrePassState{}); if (!result.second) return; prepass_state = result.first; } // reset the renderpass state prepass_state->second = {}; const auto* rp_state = GetRenderPassState(pRenderPassBegin->renderPass); // track depth / color attachment usage within the renderpass for (size_t i = 0; i < rp_state->createInfo.subpassCount; i++) { // record if depth/color attachments are in use for this renderpass if (rp_state->createInfo.pSubpasses[i].pDepthStencilAttachment != nullptr) prepass_state->second.depthAttachment = true; if (rp_state->createInfo.pSubpasses[i].colorAttachmentCount > 0) prepass_state->second.colorAttachment = true; } } void BestPractices::PostCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents) { StateTracker::PostCallRecordCmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents); RecordCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin); } void BestPractices::PostCallRecordCmdBeginRenderPass2(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo) { StateTracker::PostCallRecordCmdBeginRenderPass2(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); } void BestPractices::PostCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo) { StateTracker::PostCallRecordCmdBeginRenderPass2KHR(commandBuffer, pRenderPassBegin, pSubpassBeginInfo); RecordCmdBeginRenderPass(commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin); } // Generic function to handle validation for all CmdDraw* type functions bool BestPractices::ValidateCmdDrawType(VkCommandBuffer cmd_buffer, const char* caller) const { bool skip = false; const CMD_BUFFER_STATE* cb_state = GetCBState(cmd_buffer); if (cb_state) { const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS); const auto* pipeline_state = cb_state->lastBound[lv_bind_point].pipeline_state; const auto& current_vtx_bfr_binding_info = cb_state->current_vertex_buffer_binding_info.vertex_buffer_bindings; // Verify vertex binding if (pipeline_state->vertex_binding_descriptions_.size() <= 0) { if ((!current_vtx_bfr_binding_info.empty()) && (!cb_state->vertex_buffer_used)) { skip |= LogPerformanceWarning(cb_state->commandBuffer(), kVUID_BestPractices_DrawState_VtxIndexOutOfBounds, "Vertex buffers are bound to %s but no vertex buffers are attached to %s.", report_data->FormatHandle(cb_state->commandBuffer()).c_str(), report_data->FormatHandle(pipeline_state->pipeline()).c_str()); } } } return skip; } void BestPractices::RecordCmdDrawType(VkCommandBuffer cmd_buffer, uint32_t draw_count, const char* caller) { if (VendorCheckEnabled(kBPVendorArm)) { RecordCmdDrawTypeArm(cmd_buffer, draw_count, caller); } } void BestPractices::RecordCmdDrawTypeArm(VkCommandBuffer cmd_buffer, uint32_t draw_count, const char* caller) { auto prepass_state = cbDepthPrePassStates.find(cmd_buffer); if (prepass_state != cbDepthPrePassStates.end() && draw_count >= kDepthPrePassMinDrawCountArm) { if (prepass_state->second.depthOnly) prepass_state->second.numDrawCallsDepthOnly++; if (prepass_state->second.depthEqualComparison) prepass_state->second.numDrawCallsDepthEqualCompare++; } } bool BestPractices::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) const { bool skip = false; if (instanceCount == 0) { skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_InstanceCountZero, "Warning: You are calling vkCmdDraw() with an instanceCount of Zero."); skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDraw()"); } return skip; } void BestPractices::PostCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) { StateTracker::PostCallRecordCmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance); RecordCmdDrawType(commandBuffer, vertexCount * instanceCount, "vkCmdDraw()"); } bool BestPractices::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const { bool skip = false; if (instanceCount == 0) { skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_InstanceCountZero, "Warning: You are calling vkCmdDrawIndexed() with an instanceCount of Zero."); } skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexed()"); // Check if we reached the limit for small indexed draw calls. // Note that we cannot update the draw call count here, so we do it in PreCallRecordCmdDrawIndexed. const CMD_BUFFER_STATE* cmd_state = GetCBState(commandBuffer); if ((indexCount * instanceCount) <= kSmallIndexedDrawcallIndices && (cmd_state->small_indexed_draw_call_count == kMaxSmallIndexedDrawcalls - 1) && VendorCheckEnabled(kBPVendorArm)) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CmdDrawIndexed_ManySmallIndexedDrawcalls, "%s: The command buffer contains many small indexed drawcalls " "(at least %u drawcalls with less than %u indices each). This may cause pipeline bubbles. " "You can try batching drawcalls or instancing when applicable.", VendorSpecificTag(kBPVendorArm), kMaxSmallIndexedDrawcalls, kSmallIndexedDrawcallIndices); } if (VendorCheckEnabled(kBPVendorArm)) { ValidateIndexBufferArm(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); } return skip; } bool BestPractices::ValidateIndexBufferArm(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const { bool skip = false; // check for sparse/underutilised index buffer, and post-transform cache thrashing const auto* cmd_state = GetCBState(commandBuffer); if (cmd_state == nullptr) return skip; const auto* ib_state = cmd_state->index_buffer_binding.buffer_state.get(); if (ib_state == nullptr || cmd_state->index_buffer_binding.buffer_state->Destroyed()) return skip; const VkIndexType ib_type = cmd_state->index_buffer_binding.index_type; const auto& ib_mem_state = *ib_state->binding.mem_state; const VkDeviceSize ib_mem_offset = ib_mem_state.mapped_range.offset; const void* ib_mem = ib_mem_state.p_driver_data; bool primitive_restart_enable = false; const auto lv_bind_point = ConvertToLvlBindPoint(VK_PIPELINE_BIND_POINT_GRAPHICS); const auto& pipeline_binding_iter = cmd_state->lastBound[lv_bind_point]; const auto* pipeline_state = pipeline_binding_iter.pipeline_state; if (pipeline_state != nullptr && pipeline_state->graphicsPipelineCI.pInputAssemblyState != nullptr) { primitive_restart_enable = pipeline_state->graphicsPipelineCI.pInputAssemblyState->primitiveRestartEnable == VK_TRUE; } // no point checking index buffer if the memory is nonexistant/unmapped, or if there is no graphics pipeline bound to this CB if (ib_mem && pipeline_binding_iter.IsUsing()) { uint32_t scan_stride; if (ib_type == VK_INDEX_TYPE_UINT8_EXT) { scan_stride = sizeof(uint8_t); } else if (ib_type == VK_INDEX_TYPE_UINT16) { scan_stride = sizeof(uint16_t); } else { scan_stride = sizeof(uint32_t); } const uint8_t* scan_begin = static_cast<const uint8_t*>(ib_mem) + ib_mem_offset + firstIndex * scan_stride; const uint8_t* scan_end = scan_begin + indexCount * scan_stride; // Min and max are important to track for some Mali architectures. In older Mali devices without IDVS, all // vertices corresponding to indices between the minimum and maximum may be loaded, and possibly shaded, // irrespective of whether or not they're part of the draw call. // start with minimum as 0xFFFFFFFF and adjust to indices in the buffer uint32_t min_index = ~0u; // start with maximum as 0 and adjust to indices in the buffer uint32_t max_index = 0u; // first scan-through, we're looking to simulate a model LRU post-transform cache, estimating the number of vertices shaded // for the given index buffer uint32_t vertex_shade_count = 0; PostTransformLRUCacheModel post_transform_cache; // The size of the cache being modelled positively correlates with how much behaviour it can capture about // arbitrary ground-truth hardware/architecture cache behaviour. I.e. it's a good solution when we don't know the // target architecture. // However, modelling a post-transform cache with more than 32 elements gives diminishing returns in practice. // http://eelpi.gotdns.org/papers/fast_vert_cache_opt.html post_transform_cache.resize(32); for (const uint8_t* scan_ptr = scan_begin; scan_ptr < scan_end; scan_ptr += scan_stride) { uint32_t scan_index; uint32_t primitive_restart_value; if (ib_type == VK_INDEX_TYPE_UINT8_EXT) { scan_index = *reinterpret_cast<const uint8_t*>(scan_ptr); primitive_restart_value = 0xFF; } else if (ib_type == VK_INDEX_TYPE_UINT16) { scan_index = *reinterpret_cast<const uint16_t*>(scan_ptr); primitive_restart_value = 0xFFFF; } else { scan_index = *reinterpret_cast<const uint32_t*>(scan_ptr); primitive_restart_value = 0xFFFFFFFF; } max_index = std::max(max_index, scan_index); min_index = std::min(min_index, scan_index); if (!primitive_restart_enable || scan_index != primitive_restart_value) { bool in_cache = post_transform_cache.query_cache(scan_index); // if the shaded vertex corresponding to the index is not in the PT-cache, we need to shade again if (!in_cache) vertex_shade_count++; } } // if the max and min values were not set, then we either have no indices, or all primitive restarts, exit... // if the max and min are the same, then it implies all the indices are the same, then we don't need to do anything if (max_index < min_index || max_index == min_index) return skip; if (max_index - min_index >= indexCount) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CmdDrawIndexed_SparseIndexBuffer, "%s The indices which were specified for the draw call only utilise approximately %.02f%% of " "index buffer value range. Arm Mali architectures before G71 do not have IDVS (Index-Driven " "Vertex Shading), meaning all vertices corresponding to indices between the minimum and " "maximum would be loaded, and possibly shaded, whether or not they are used.", VendorSpecificTag(kBPVendorArm), (static_cast<float>(indexCount) / static_cast<float>(max_index - min_index)) * 100.0f); return skip; } // use a dynamic vector of bitsets as a memory-compact representation of which indices are included in the draw call // each bit of the n-th bucket contains the inclusion information for indices (n*n_buckets) to ((n+1)*n_buckets) const size_t refs_per_bucket = 64; std::vector<std::bitset<refs_per_bucket>> vertex_reference_buckets; const uint32_t n_indices = max_index - min_index + 1; const uint32_t n_buckets = (n_indices / static_cast<uint32_t>(refs_per_bucket)) + ((n_indices % static_cast<uint32_t>(refs_per_bucket)) != 0 ? 1 : 0); // there needs to be at least one bitset to store a set of indices smaller than n_buckets vertex_reference_buckets.resize(std::max(1u, n_buckets)); // To avoid using too much memory, we run over the indices again. // Knowing the size from the last scan allows us to record index usage with bitsets for (const uint8_t* scan_ptr = scan_begin; scan_ptr < scan_end; scan_ptr += scan_stride) { uint32_t scan_index; if (ib_type == VK_INDEX_TYPE_UINT8_EXT) { scan_index = *reinterpret_cast<const uint8_t*>(scan_ptr); } else if (ib_type == VK_INDEX_TYPE_UINT16) { scan_index = *reinterpret_cast<const uint16_t*>(scan_ptr); } else { scan_index = *reinterpret_cast<const uint32_t*>(scan_ptr); } // keep track of the set of all indices used to reference vertices in the draw call size_t index_offset = scan_index - min_index; size_t bitset_bucket_index = index_offset / refs_per_bucket; uint64_t used_indices = 1ull << ((index_offset % refs_per_bucket) & 0xFFFFFFFFu); vertex_reference_buckets[bitset_bucket_index] |= used_indices; } uint32_t vertex_reference_count = 0; for (const auto& bitset : vertex_reference_buckets) { vertex_reference_count += static_cast<uint32_t>(bitset.count()); } // low index buffer utilization implies that: of the vertices available to the draw call, not all are utilized float utilization = static_cast<float>(vertex_reference_count) / static_cast<float>(max_index - min_index + 1); // low hit rate (high miss rate) implies the order of indices in the draw call may be possible to improve float cache_hit_rate = static_cast<float>(vertex_reference_count) / static_cast<float>(vertex_shade_count); if (utilization < 0.5f) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CmdDrawIndexed_SparseIndexBuffer, "%s The indices which were specified for the draw call only utilise approximately " "%.02f%% of the bound vertex buffer.", VendorSpecificTag(kBPVendorArm), utilization); } if (cache_hit_rate <= 0.5f) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CmdDrawIndexed_PostTransformCacheThrashing, "%s The indices which were specified for the draw call are estimated to cause thrashing of " "the post-transform vertex cache, with a hit-rate of %.02f%%. " "I.e. the ordering of the index buffer may not make optimal use of indices associated with " "recently shaded vertices.", VendorSpecificTag(kBPVendorArm), cache_hit_rate * 100.0f); } } return skip; } void BestPractices::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) { ValidationStateTracker::PreCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); CMD_BUFFER_STATE* cmd_state = GetCBState(commandBuffer); if ((indexCount * instanceCount) <= kSmallIndexedDrawcallIndices) { cmd_state->small_indexed_draw_call_count++; } ValidateBoundDescriptorSets(commandBuffer, "vkCmdDrawIndexed()"); } void BestPractices::PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) { StateTracker::PostCallRecordCmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); RecordCmdDrawType(commandBuffer, indexCount * instanceCount, "vkCmdDrawIndexed()"); } bool BestPractices::PreCallValidateCmdDrawIndexedIndirectCount(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { bool skip = ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexedIndirectCount()"); return skip; } bool BestPractices::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) const { bool skip = ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexedIndirectCountKHR()"); return skip; } bool BestPractices::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const { bool skip = false; if (drawCount == 0) { skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_DrawCountZero, "Warning: You are calling vkCmdDrawIndirect() with a drawCount of Zero."); skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndirect()"); } return skip; } void BestPractices::PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { StateTracker::PostCallRecordCmdDrawIndirect(commandBuffer, buffer, offset, count, stride); RecordCmdDrawType(commandBuffer, count, "vkCmdDrawIndirect()"); } bool BestPractices::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) const { bool skip = false; if (drawCount == 0) { skip |= LogWarning(device, kVUID_BestPractices_CmdDraw_DrawCountZero, "Warning: You are calling vkCmdDrawIndexedIndirect() with a drawCount of Zero."); skip |= ValidateCmdDrawType(commandBuffer, "vkCmdDrawIndexedIndirect()"); } return skip; } void BestPractices::PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) { StateTracker::PostCallRecordCmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride); RecordCmdDrawType(commandBuffer, count, "vkCmdDrawIndexedIndirect()"); } void BestPractices::ValidateBoundDescriptorSets(VkCommandBuffer commandBuffer, const char* function_name) { CMD_BUFFER_STATE* cb_state = GetCBState(commandBuffer); if (cb_state) { for (auto descriptor_set : cb_state->validated_descriptor_sets) { const auto& layout = *descriptor_set->GetLayout(); for (uint32_t index = 0; index < descriptor_set->GetBindingCount(); ++index) { // For bindless scenarios, we should not attempt to track descriptor set state. // It is highly uncertain which resources are actually bound. // Resources which are written to such a descriptor should be marked as indeterminate w.r.t. state. VkDescriptorBindingFlags flags = layout.GetDescriptorBindingFlagsFromIndex(index); if (flags & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT)) { continue; } auto index_range = layout.GetGlobalIndexRangeFromIndex(index); for (uint32_t i = index_range.start; i < index_range.end; ++i) { VkImageView image_view{VK_NULL_HANDLE}; auto descriptor = descriptor_set->GetDescriptorFromGlobalIndex(i); switch (descriptor->GetClass()) { case cvdescriptorset::DescriptorClass::Image: { if (const auto image_descriptor = static_cast<const cvdescriptorset::ImageDescriptor*>(descriptor)) { image_view = image_descriptor->GetImageView(); } break; } case cvdescriptorset::DescriptorClass::ImageSampler: { if (const auto image_sampler_descriptor = static_cast<const cvdescriptorset::ImageSamplerDescriptor*>(descriptor)) { image_view = image_sampler_descriptor->GetImageView(); } break; } default: break; } if (image_view) { IMAGE_VIEW_STATE* image_view_state = GetImageViewState(image_view); QueueValidateImageView(cb_state->queue_submit_functions, function_name, image_view_state, IMAGE_SUBRESOURCE_USAGE_BP::DESCRIPTOR_ACCESS); } } } } } } void BestPractices::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) { ValidateBoundDescriptorSets(commandBuffer, "vkCmdDraw()"); } void BestPractices::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) { ValidateBoundDescriptorSets(commandBuffer, "vkCmdDrawIndirect()"); } void BestPractices::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride) { ValidateBoundDescriptorSets(commandBuffer, "vkCmdDrawIndexedIndirect()"); } bool BestPractices::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ) const { bool skip = false; if ((groupCountX == 0) || (groupCountY == 0) || (groupCountZ == 0)) { skip |= LogWarning(device, kVUID_BestPractices_CmdDispatch_GroupCountZero, "Warning: You are calling vkCmdDispatch() while one or more groupCounts are zero (groupCountX = %" PRIu32 ", groupCountY = %" PRIu32 ", groupCountZ = %" PRIu32 ").", groupCountX, groupCountY, groupCountZ); } return skip; } bool BestPractices::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) const { bool skip = false; skip |= StateTracker::PreCallValidateCmdEndRenderPass(commandBuffer); auto prepass_state = cbDepthPrePassStates.find(commandBuffer); if (prepass_state == cbDepthPrePassStates.end()) return skip; bool uses_depth = (prepass_state->second.depthAttachment || prepass_state->second.colorAttachment) && prepass_state->second.numDrawCallsDepthEqualCompare >= kDepthPrePassNumDrawCallsArm && prepass_state->second.numDrawCallsDepthOnly >= kDepthPrePassNumDrawCallsArm; if (uses_depth) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_EndRenderPass_DepthPrePassUsage, "%s Depth pre-passes may be in use. In general, this is not recommended, as in Arm Mali GPUs since " "Mali-T620, Forward Pixel Killing (FPK) can already perform automatic hidden surface removal; in which " "case, using depth pre-passes for hidden surface removal may worsen performance.", VendorSpecificTag(kBPVendorArm)); } return skip; } void BestPractices::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) { ValidateBoundDescriptorSets(commandBuffer, "vkCmdDispatch()"); } void BestPractices::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) { ValidateBoundDescriptorSets(commandBuffer, "vkCmdDispatchIndirect()"); } bool BestPractices::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(VkPhysicalDevice physicalDevice, const char* api_name) const { bool skip = false; const auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { if (bp_pd_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) { skip |= LogWarning(physicalDevice, kVUID_BestPractices_DisplayPlane_PropertiesNotCalled, "Potential problem with calling %s() without first retrieving properties from " "vkGetPhysicalDeviceDisplayPlanePropertiesKHR or vkGetPhysicalDeviceDisplayPlaneProperties2KHR.", api_name); } } return skip; } bool BestPractices::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, "vkGetDisplayPlaneSupportedDisplaysKHR"); return skip; } bool BestPractices::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, "vkGetDisplayPlaneCapabilitiesKHR"); return skip; } bool BestPractices::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities) const { bool skip = false; skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(physicalDevice, "vkGetDisplayPlaneCapabilities2KHR"); return skip; } bool BestPractices::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages) const { bool skip = false; auto swapchain_state_itr = swapchain_bp_state_map.find(swapchain); if ((swapchain_state_itr != swapchain_bp_state_map.cend()) && pSwapchainImages) { // Compare the preliminary value of *pSwapchainImageCount with the value this time: if (swapchain_state_itr->second.vkGetSwapchainImagesKHRState == UNCALLED) { skip |= LogWarning(device, kVUID_Core_Swapchain_PriorCount, "vkGetSwapchainImagesKHR() called with non-NULL pSwapchainImageCount; but no prior positive value has " "been seen for pSwapchainImages."); } } return skip; } // Common function to handle validation for GetPhysicalDeviceQueueFamilyProperties & 2KHR version bool BestPractices::ValidateCommonGetPhysicalDeviceQueueFamilyProperties(const PHYSICAL_DEVICE_STATE* pd_state, uint32_t requested_queue_family_property_count, const CALL_STATE call_state, const char* caller_name) const { bool skip = false; // Verify that for each physical device, this command is called first with NULL pQueueFamilyProperties in order to get count if (UNCALLED == call_state) { skip |= LogWarning( pd_state->phys_device, kVUID_Core_DevLimit_MissingQueryCount, "%s is called with non-NULL pQueueFamilyProperties before obtaining pQueueFamilyPropertyCount. It is " "recommended " "to first call %s with NULL pQueueFamilyProperties in order to obtain the maximal pQueueFamilyPropertyCount.", caller_name, caller_name); // Then verify that pCount that is passed in on second call matches what was returned } else if (pd_state->queue_family_known_count != requested_queue_family_property_count) { skip |= LogWarning(pd_state->phys_device, kVUID_Core_DevLimit_CountMismatch, "%s is called with non-NULL pQueueFamilyProperties and pQueueFamilyPropertyCount value %" PRIu32 ", but the largest previously returned pQueueFamilyPropertyCount for this physicalDevice is %" PRIu32 ". It is recommended to instead receive all the properties by calling %s with " "pQueueFamilyPropertyCount that was " "previously obtained by calling %s with NULL pQueueFamilyProperties.", caller_name, requested_queue_family_property_count, pd_state->queue_family_known_count, caller_name, caller_name); } return skip; } bool BestPractices::PreCallValidateBindAccelerationStructureMemoryNV( VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos) const { bool skip = false; for (uint32_t i = 0; i < bindInfoCount; i++) { const ACCELERATION_STRUCTURE_STATE* as_state = GetAccelerationStructureStateNV(pBindInfos[i].accelerationStructure); if (!as_state->memory_requirements_checked) { // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling // BindAccelerationStructureMemoryNV but it's implied in that memory being bound must conform with // VkAccelerationStructureMemoryRequirementsInfoNV from vkGetAccelerationStructureMemoryRequirementsNV skip |= LogWarning( device, kVUID_BestPractices_BindAccelNV_NoMemReqQuery, "vkBindAccelerationStructureMemoryNV(): " "Binding memory to %s but vkGetAccelerationStructureMemoryRequirementsNV() has not been called on that structure.", report_data->FormatHandle(pBindInfos[i].accelerationStructure).c_str()); } } return skip; } bool BestPractices::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties) const { const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); const auto* bp_pd_state = GetPhysicalDeviceStateBP(physical_device_state->phys_device); if (pQueueFamilyProperties && bp_pd_state) { return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, bp_pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState, "vkGetPhysicalDeviceQueueFamilyProperties()"); } return false; } bool BestPractices::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties) const { const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); const auto* bp_pd_state = GetPhysicalDeviceStateBP(physical_device_state->phys_device); if (pQueueFamilyProperties && bp_pd_state) { return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, bp_pd_state->vkGetPhysicalDeviceQueueFamilyProperties2State, "vkGetPhysicalDeviceQueueFamilyProperties2()"); } return false; } bool BestPractices::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR( VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties) const { auto physical_device_state = GetPhysicalDeviceState(physicalDevice); assert(physical_device_state); const auto* bp_pd_state = GetPhysicalDeviceStateBP(physical_device_state->phys_device); if (pQueueFamilyProperties && bp_pd_state) { return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, bp_pd_state->vkGetPhysicalDeviceQueueFamilyProperties2KHRState, "vkGetPhysicalDeviceQueueFamilyProperties2KHR()"); } return false; } bool BestPractices::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats) const { if (!pSurfaceFormats) return false; const auto physical_device_state = GetPhysicalDeviceState(physicalDevice); const auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); const auto& call_state = bp_pd_state->vkGetPhysicalDeviceSurfaceFormatsKHRState; bool skip = false; if (call_state == UNCALLED) { // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't // previously call this function with a NULL value of pSurfaceFormats: skip |= LogWarning(physicalDevice, kVUID_Core_DevLimit_MustQueryCount, "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior " "positive value has been seen for pSurfaceFormats."); } else { auto prev_format_count = static_cast<uint32_t>(physical_device_state->surface_formats.size()); if (*pSurfaceFormatCount > prev_format_count) { skip |= LogWarning(physicalDevice, kVUID_Core_DevLimit_CountMismatch, "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with " "pSurfaceFormats set to a value (%u) that is greater than the value (%u) that was returned " "when pSurfaceFormatCount was NULL.", *pSurfaceFormatCount, prev_format_count); } } return skip; } bool BestPractices::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence) const { bool skip = false; for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; bind_idx++) { const VkBindSparseInfo& bind_info = pBindInfo[bind_idx]; // Store sparse binding image_state and after binding is complete make sure that any requiring metadata have it bound layer_data::unordered_set<const IMAGE_STATE*> sparse_images; // Track images getting metadata bound by this call in a set, it'll be recorded into the image_state // in RecordQueueBindSparse. layer_data::unordered_set<const IMAGE_STATE*> sparse_images_with_metadata; // If we're binding sparse image memory make sure reqs were queried and note if metadata is required and bound for (uint32_t i = 0; i < bind_info.imageBindCount; ++i) { const auto& image_bind = bind_info.pImageBinds[i]; auto image_state = GetImageState(image_bind.image); if (!image_state) { continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. } sparse_images.insert(image_state); if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) { // For now just warning if sparse image binding occurs without calling to get reqs first skip |= LogWarning(image_state->image(), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to %s without first calling " "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.", report_data->FormatHandle(image_state->image()).c_str()); } } if (!image_state->memory_requirements_checked) { // For now just warning if sparse image binding occurs without calling to get reqs first skip |= LogWarning(image_state->image(), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to %s without first calling " "vkGetImageMemoryRequirements() to retrieve requirements.", report_data->FormatHandle(image_state->image()).c_str()); } } for (uint32_t i = 0; i < bind_info.imageOpaqueBindCount; ++i) { const auto& image_opaque_bind = bind_info.pImageOpaqueBinds[i]; auto image_state = GetImageState(bind_info.pImageOpaqueBinds[i].image); if (!image_state) { continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. } sparse_images.insert(image_state); if (image_state->createInfo.flags & VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT) { if (!image_state->get_sparse_reqs_called || image_state->sparse_requirements.empty()) { // For now just warning if sparse image binding occurs without calling to get reqs first skip |= LogWarning(image_state->image(), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling " "vkGetImageSparseMemoryRequirements[2KHR]() to retrieve requirements.", report_data->FormatHandle(image_state->image()).c_str()); } } if (!image_state->memory_requirements_checked) { // For now just warning if sparse image binding occurs without calling to get reqs first skip |= LogWarning(image_state->image(), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding opaque sparse memory to %s without first calling " "vkGetImageMemoryRequirements() to retrieve requirements.", report_data->FormatHandle(image_state->image()).c_str()); } for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) { if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) { sparse_images_with_metadata.insert(image_state); } } } for (const auto& sparse_image_state : sparse_images) { if (sparse_image_state->sparse_metadata_required && !sparse_image_state->sparse_metadata_bound && sparse_images_with_metadata.find(sparse_image_state) == sparse_images_with_metadata.end()) { // Warn if sparse image binding metadata required for image with sparse binding, but metadata not bound skip |= LogWarning(sparse_image_state->image(), kVUID_Core_MemTrack_InvalidState, "vkQueueBindSparse(): Binding sparse memory to %s which requires a metadata aspect but no " "binding with VK_SPARSE_MEMORY_BIND_METADATA_BIT set was made.", report_data->FormatHandle(sparse_image_state->image()).c_str()); } } } return skip; } void BestPractices::ManualPostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence, VkResult result) { if (result != VK_SUCCESS) { return; } for (uint32_t bind_idx = 0; bind_idx < bindInfoCount; bind_idx++) { const VkBindSparseInfo& bind_info = pBindInfo[bind_idx]; for (uint32_t i = 0; i < bind_info.imageOpaqueBindCount; ++i) { const auto& image_opaque_bind = bind_info.pImageOpaqueBinds[i]; auto image_state = GetImageState(bind_info.pImageOpaqueBinds[i].image); if (!image_state) { continue; // Param/Object validation should report image_bind.image handles being invalid, so just skip here. } for (uint32_t j = 0; j < image_opaque_bind.bindCount; ++j) { if (image_opaque_bind.pBinds[j].flags & VK_SPARSE_MEMORY_BIND_METADATA_BIT) { image_state->sparse_metadata_bound = true; } } } } } bool BestPractices::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects) const { bool skip = false; const CMD_BUFFER_STATE* cb_node = GetCBState(commandBuffer); if (!cb_node) return skip; // Warn if this is issued prior to Draw Cmd and clearing the entire attachment if (!cb_node->hasDrawCmd && (cb_node->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) && (cb_node->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) { // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass) // This warning should be made more specific. It'd be best to avoid triggering this test if it's a use that must call // CmdClearAttachments. skip |= LogPerformanceWarning(commandBuffer, kVUID_BestPractices_DrawState_ClearCmdBeforeDraw, "vkCmdClearAttachments() issued on %s prior to any Draw Cmds. It is recommended you " "use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.", report_data->FormatHandle(commandBuffer).c_str()); } // Check for uses of ClearAttachments along with LOAD_OP_LOAD, // as it can be more efficient to just use LOAD_OP_CLEAR const RENDER_PASS_STATE* rp = cb_node->activeRenderPass.get(); if (rp) { const auto& subpass = rp->createInfo.pSubpasses[cb_node->activeSubpass]; for (uint32_t i = 0; i < attachmentCount; i++) { const auto& attachment = pAttachments[i]; if (attachment.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) { uint32_t color_attachment = attachment.colorAttachment; uint32_t fb_attachment = subpass.pColorAttachments[color_attachment].attachment; if (fb_attachment != VK_ATTACHMENT_UNUSED) { if (rp->createInfo.pAttachments[fb_attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_ClearAttachments_ClearAfterLoad, "vkCmdClearAttachments() issued on %s for color attachment #%u in this subpass, " "but LOAD_OP_LOAD was used. If you need to clear the framebuffer, always use LOAD_OP_CLEAR as " "it is more efficient.", report_data->FormatHandle(commandBuffer).c_str(), color_attachment); } } } if (subpass.pDepthStencilAttachment && attachment.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) { uint32_t fb_attachment = subpass.pDepthStencilAttachment->attachment; if (fb_attachment != VK_ATTACHMENT_UNUSED) { if (rp->createInfo.pAttachments[fb_attachment].loadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_ClearAttachments_ClearAfterLoad, "vkCmdClearAttachments() issued on %s for the depth attachment in this subpass, " "but LOAD_OP_LOAD was used. If you need to clear the framebuffer, always use LOAD_OP_CLEAR as " "it is more efficient.", report_data->FormatHandle(commandBuffer).c_str()); } } } if (subpass.pDepthStencilAttachment && attachment.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) { uint32_t fb_attachment = subpass.pDepthStencilAttachment->attachment; if (fb_attachment != VK_ATTACHMENT_UNUSED) { if (rp->createInfo.pAttachments[fb_attachment].stencilLoadOp == VK_ATTACHMENT_LOAD_OP_LOAD) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_ClearAttachments_ClearAfterLoad, "vkCmdClearAttachments() issued on %s for the stencil attachment in this subpass, " "but LOAD_OP_LOAD was used. If you need to clear the framebuffer, always use LOAD_OP_CLEAR as " "it is more efficient.", report_data->FormatHandle(commandBuffer).c_str()); } } } } } return skip; } bool BestPractices::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions) const { bool skip = false; skip |= VendorCheckEnabled(kBPVendorArm) && LogPerformanceWarning(device, kVUID_BestPractices_CmdResolveImage_ResolvingImage, "%s Attempting to use vkCmdResolveImage to resolve a multisampled image. " "This is a very slow and extremely bandwidth intensive path. " "You should always resolve multisampled images on-tile with pResolveAttachments in VkRenderPass.", VendorSpecificTag(kBPVendorArm)); return skip; } bool BestPractices::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo) const { bool skip = false; skip |= VendorCheckEnabled(kBPVendorArm) && LogPerformanceWarning(device, kVUID_BestPractices_CmdResolveImage2KHR_ResolvingImage, "%s Attempting to use vkCmdResolveImage2KHR to resolve a multisampled image. " "This is a very slow and extremely bandwidth intensive path. " "You should always resolve multisampled images on-tile with pResolveAttachments in VkRenderPass.", VendorSpecificTag(kBPVendorArm)); return skip; } void BestPractices::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* src = GetImageUsageState(srcImage); auto* dst = GetImageUsageState(dstImage); for (uint32_t i = 0; i < regionCount; i++) { QueueValidateImage(funcs, "vkCmdResolveImage()", src, IMAGE_SUBRESOURCE_USAGE_BP::RESOLVE_READ, pRegions[i].srcSubresource); QueueValidateImage(funcs, "vkCmdResolveImage()", dst, IMAGE_SUBRESOURCE_USAGE_BP::RESOLVE_WRITE, pRegions[i].dstSubresource); } } void BestPractices::PreCallRecordCmdResolveImage2KHR(VkCommandBuffer commandBuffer, const VkResolveImageInfo2KHR* pResolveImageInfo) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* src = GetImageUsageState(pResolveImageInfo->srcImage); auto* dst = GetImageUsageState(pResolveImageInfo->dstImage); uint32_t regionCount = pResolveImageInfo->regionCount; for (uint32_t i = 0; i < regionCount; i++) { QueueValidateImage(funcs, "vkCmdResolveImage2KHR()", src, IMAGE_SUBRESOURCE_USAGE_BP::RESOLVE_READ, pResolveImageInfo->pRegions[i].srcSubresource); QueueValidateImage(funcs, "vkCmdResolveImage2KHR()", dst, IMAGE_SUBRESOURCE_USAGE_BP::RESOLVE_WRITE, pResolveImageInfo->pRegions[i].dstSubresource); } } void BestPractices::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* dst = GetImageUsageState(image); for (uint32_t i = 0; i < rangeCount; i++) { QueueValidateImage(funcs, "vkCmdClearColorImage()", dst, IMAGE_SUBRESOURCE_USAGE_BP::CLEARED, pRanges[i]); } } void BestPractices::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* dst = GetImageUsageState(image); for (uint32_t i = 0; i < rangeCount; i++) { QueueValidateImage(funcs, "vkCmdClearDepthStencilImage()", dst, IMAGE_SUBRESOURCE_USAGE_BP::CLEARED, pRanges[i]); } } void BestPractices::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* src = GetImageUsageState(srcImage); auto* dst = GetImageUsageState(dstImage); for (uint32_t i = 0; i < regionCount; i++) { QueueValidateImage(funcs, "vkCmdCopyImage()", src, IMAGE_SUBRESOURCE_USAGE_BP::COPY_READ, pRegions[i].srcSubresource); QueueValidateImage(funcs, "vkCmdCopyImage()", dst, IMAGE_SUBRESOURCE_USAGE_BP::COPY_WRITE, pRegions[i].dstSubresource); } } void BestPractices::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* dst = GetImageUsageState(dstImage); for (uint32_t i = 0; i < regionCount; i++) { QueueValidateImage(funcs, "vkCmdCopyBufferToImage()", dst, IMAGE_SUBRESOURCE_USAGE_BP::COPY_WRITE, pRegions[i].imageSubresource); } } void BestPractices::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* src = GetImageUsageState(srcImage); for (uint32_t i = 0; i < regionCount; i++) { QueueValidateImage(funcs, "vkCmdCopyImageToBuffer()", src, IMAGE_SUBRESOURCE_USAGE_BP::COPY_READ, pRegions[i].imageSubresource); } } void BestPractices::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter) { CMD_BUFFER_STATE* cb = GetCBState(commandBuffer); auto &funcs = cb->queue_submit_functions; auto* src = GetImageUsageState(srcImage); auto* dst = GetImageUsageState(dstImage); for (uint32_t i = 0; i < regionCount; i++) { QueueValidateImage(funcs, "vkCmdBlitImage()", src, IMAGE_SUBRESOURCE_USAGE_BP::BLIT_READ, pRegions[i].srcSubresource); QueueValidateImage(funcs, "vkCmdBlitImage()", dst, IMAGE_SUBRESOURCE_USAGE_BP::BLIT_WRITE, pRegions[i].dstSubresource); } } bool BestPractices::PreCallValidateCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler) const { bool skip = false; if (VendorCheckEnabled(kBPVendorArm)) { if ((pCreateInfo->addressModeU != pCreateInfo->addressModeV) || (pCreateInfo->addressModeV != pCreateInfo->addressModeW)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateSampler_DifferentWrappingModes, "%s Creating a sampler object with wrapping modes which do not match (U = %u, V = %u, W = %u). " "This may cause reduced performance even if only U (1D image) or U/V wrapping modes (2D " "image) are actually used. If you need different wrapping modes, disregard this warning.", VendorSpecificTag(kBPVendorArm), pCreateInfo->addressModeU, pCreateInfo->addressModeV, pCreateInfo->addressModeW); } if ((pCreateInfo->minLod != 0.0f) || (pCreateInfo->maxLod < VK_LOD_CLAMP_NONE)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateSampler_LodClamping, "%s Creating a sampler object with LOD clamping (minLod = %f, maxLod = %f). This may cause reduced performance. " "Instead of clamping LOD in the sampler, consider using an VkImageView which restricts the mip-levels, set minLod " "to 0.0, and maxLod to VK_LOD_CLAMP_NONE.", VendorSpecificTag(kBPVendorArm), pCreateInfo->minLod, pCreateInfo->maxLod); } if (pCreateInfo->mipLodBias != 0.0f) { skip |= LogPerformanceWarning(device, kVUID_BestPractices_CreateSampler_LodBias, "%s Creating a sampler object with LOD bias != 0.0 (%f). This will lead to less efficient " "descriptors being created and may cause reduced performance.", VendorSpecificTag(kBPVendorArm), pCreateInfo->mipLodBias); } if ((pCreateInfo->addressModeU == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER || pCreateInfo->addressModeV == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER || pCreateInfo->addressModeW == VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER) && (pCreateInfo->borderColor != VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK)) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateSampler_BorderClampColor, "%s Creating a sampler object with border clamping and borderColor != VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK. " "This will lead to less efficient descriptors being created and may cause reduced performance. " "If possible, use VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK as the border color.", VendorSpecificTag(kBPVendorArm)); } if (pCreateInfo->unnormalizedCoordinates) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateSampler_UnnormalizedCoordinates, "%s Creating a sampler object with unnormalized coordinates. This will lead to less efficient " "descriptors being created and may cause reduced performance.", VendorSpecificTag(kBPVendorArm)); } if (pCreateInfo->anisotropyEnable) { skip |= LogPerformanceWarning( device, kVUID_BestPractices_CreateSampler_Anisotropy, "%s Creating a sampler object with anisotropy. This will lead to less efficient descriptors being created " "and may cause reduced performance.", VendorSpecificTag(kBPVendorArm)); } } return skip; } void BestPractices::PostTransformLRUCacheModel::resize(size_t size) { _entries.resize(size); } bool BestPractices::PostTransformLRUCacheModel::query_cache(uint32_t value) { // look for a cache hit auto hit = std::find_if(_entries.begin(), _entries.end(), [value](const CacheEntry& entry) { return entry.value == value; }); if (hit != _entries.end()) { // mark the cache hit as being most recently used hit->age = iteration++; return true; } // if there's no cache hit, we need to model the entry being inserted into the cache CacheEntry new_entry = {value, iteration}; if (iteration < static_cast<uint32_t>(std::distance(_entries.begin(), _entries.end()))) { // if there is still space left in the cache, use the next available slot *(_entries.begin() + iteration) = new_entry; } else { // otherwise replace the least recently used cache entry auto lru = std::min_element(_entries.begin(), hit, [](const CacheEntry& a, const CacheEntry& b) { return a.age < b.age; }); *lru = new_entry; } iteration++; return false; } bool BestPractices::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex) const { const auto swapchain_data = GetSwapchainState(swapchain); bool skip = false; if (swapchain_data && swapchain_data->images.size() == 0) { skip |= LogWarning(swapchain, kVUID_Core_DrawState_SwapchainImagesNotFound, "vkAcquireNextImageKHR: No images found to acquire from. Application probably did not call " "vkGetSwapchainImagesKHR after swapchain creation."); } return skip; } void BestPractices::CommonPostCallRecordGetPhysicalDeviceQueueFamilyProperties(CALL_STATE& call_state, bool no_pointer) { if (no_pointer) { if (UNCALLED == call_state) { call_state = QUERY_COUNT; } } else { // Save queue family properties call_state = QUERY_DETAILS; } } void BestPractices::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties) { ValidationStateTracker::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { CommonPostCallRecordGetPhysicalDeviceQueueFamilyProperties(bp_pd_state->vkGetPhysicalDeviceQueueFamilyPropertiesState, nullptr == pQueueFamilyProperties); } } void BestPractices::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties) { ValidationStateTracker::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { CommonPostCallRecordGetPhysicalDeviceQueueFamilyProperties(bp_pd_state->vkGetPhysicalDeviceQueueFamilyProperties2State, nullptr == pQueueFamilyProperties); } } void BestPractices::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties) { ValidationStateTracker::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties); auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { CommonPostCallRecordGetPhysicalDeviceQueueFamilyProperties(bp_pd_state->vkGetPhysicalDeviceQueueFamilyProperties2KHRState, nullptr == pQueueFamilyProperties); } } void BestPractices::PostCallRecordGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures) { ValidationStateTracker::PostCallRecordGetPhysicalDeviceFeatures(physicalDevice, pFeatures); auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { bp_pd_state->vkGetPhysicalDeviceFeaturesState = QUERY_DETAILS; } } void BestPractices::PostCallRecordGetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures) { ValidationStateTracker::PostCallRecordGetPhysicalDeviceFeatures2(physicalDevice, pFeatures); auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { bp_pd_state->vkGetPhysicalDeviceFeaturesState = QUERY_DETAILS; } } void BestPractices::PostCallRecordGetPhysicalDeviceFeatures2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures) { ValidationStateTracker::PostCallRecordGetPhysicalDeviceFeatures2KHR(physicalDevice, pFeatures); auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { bp_pd_state->vkGetPhysicalDeviceFeaturesState = QUERY_DETAILS; } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities, VkResult result) { auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { bp_pd_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR( VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities, VkResult result) { auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { bp_pd_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities, VkResult result) { auto* bp_pd_state = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_state) { bp_pd_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS; } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes, VkResult result) { auto* bp_pd_data = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_data) { auto& call_state = bp_pd_data->vkGetPhysicalDeviceSurfacePresentModesKHRState; if (*pPresentModeCount) { if (call_state < QUERY_COUNT) { call_state = QUERY_COUNT; } } if (pPresentModes) { if (call_state < QUERY_DETAILS) { call_state = QUERY_DETAILS; } } } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats, VkResult result) { auto* bp_pd_data = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_data) { auto& call_state = bp_pd_data->vkGetPhysicalDeviceSurfaceFormatsKHRState; if (*pSurfaceFormatCount) { if (call_state < QUERY_COUNT) { call_state = QUERY_COUNT; } } if (pSurfaceFormats) { if (call_state < QUERY_DETAILS) { call_state = QUERY_DETAILS; } } } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats, VkResult result) { auto* bp_pd_data = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_data) { if (*pSurfaceFormatCount) { if (bp_pd_data->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) { bp_pd_data->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT; } } if (pSurfaceFormats) { if (bp_pd_data->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_DETAILS) { bp_pd_data->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_DETAILS; } } } } void BestPractices::ManualPostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties, VkResult result) { auto* bp_pd_data = GetPhysicalDeviceStateBP(physicalDevice); if (bp_pd_data) { if (*pPropertyCount) { if (bp_pd_data->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) { bp_pd_data->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT; } } if (pProperties) { if (bp_pd_data->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_DETAILS) { bp_pd_data->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_DETAILS; } } } } void BestPractices::ManualPostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain, VkResult result) { if (VK_SUCCESS == result) { swapchain_bp_state_map.emplace(*pSwapchain, SWAPCHAIN_STATE_BP{}); } } void BestPractices::PostCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator) { ValidationStateTracker::PostCallRecordDestroySwapchainKHR(device, swapchain, pAllocator); auto swapchain_state_itr = swapchain_bp_state_map.find(swapchain); if (swapchain_state_itr != swapchain_bp_state_map.cend()) { swapchain_bp_state_map.erase(swapchain_state_itr); } } void BestPractices::ManualPostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages, VkResult result) { auto swapchain_state_itr = swapchain_bp_state_map.find(swapchain); assert(swapchain_state_itr != swapchain_bp_state_map.cend()); auto& swapchain_state = swapchain_state_itr->second; if (pSwapchainImages || *pSwapchainImageCount) { if (swapchain_state.vkGetSwapchainImagesKHRState < QUERY_DETAILS) { swapchain_state.vkGetSwapchainImagesKHRState = QUERY_DETAILS; } } } void BestPractices::ManualPostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices, VkResult result) { if ((nullptr != pPhysicalDevices) && ((result == VK_SUCCESS || result == VK_INCOMPLETE))) { for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) { phys_device_bp_state_map.emplace(pPhysicalDevices[i], PHYSICAL_DEVICE_STATE_BP{}); } } } void BestPractices::ManualPostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo*, const VkAllocationCallbacks*, VkDevice*, VkResult result) { if (VK_SUCCESS == result) { instance_device_bp_state = &phys_device_bp_state_map[gpu]; } } PHYSICAL_DEVICE_STATE_BP* BestPractices::GetPhysicalDeviceStateBP(const VkPhysicalDevice& phys_device) { if (phys_device_bp_state_map.count(phys_device) > 0) { return &phys_device_bp_state_map.at(phys_device); } else { return nullptr; } } const PHYSICAL_DEVICE_STATE_BP* BestPractices::GetPhysicalDeviceStateBP(const VkPhysicalDevice& phys_device) const { if (phys_device_bp_state_map.count(phys_device) > 0) { return &phys_device_bp_state_map.at(phys_device); } else { return nullptr; } } PHYSICAL_DEVICE_STATE_BP* BestPractices::GetPhysicalDeviceStateBP() { auto bp_state = (reinterpret_cast<BestPractices*>(instance_state))->instance_device_bp_state; if (bp_state) { return bp_state; } else if (!bp_state && phys_device_bp_state_map.count(physical_device_state->phys_device) > 0) { return &phys_device_bp_state_map.at(physical_device_state->phys_device); } else { return nullptr; } } const PHYSICAL_DEVICE_STATE_BP* BestPractices::GetPhysicalDeviceStateBP() const { auto bp_state = (reinterpret_cast<BestPractices*>(instance_state))->instance_device_bp_state; if (bp_state) { return bp_state; } else if (!bp_state && phys_device_bp_state_map.count(physical_device_state->phys_device) > 0) { return &phys_device_bp_state_map.at(physical_device_state->phys_device); } else { return nullptr; } } void BestPractices::PreCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence) { ValidationStateTracker::PreCallRecordQueueSubmit(queue, submitCount, pSubmits, fence); QUEUE_STATE* queue_state = GetQueueState(queue); for (uint32_t submit = 0; submit < submitCount; submit++) { const auto& submit_info = pSubmits[submit]; for (uint32_t cb_index = 0; cb_index < submit_info.commandBufferCount; cb_index++) { CMD_BUFFER_STATE* cb = GetCBState(submit_info.pCommandBuffers[cb_index]); for (auto &func : cb->queue_submit_functions) { func(this, queue_state); } } } } void BestPractices::PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo) { ValidationStateTracker::PreCallRecordBeginCommandBuffer(commandBuffer, pBeginInfo); // This should not be required, but guards against buggy applications which do not call EndRenderPass correctly. queue_submit_functions_after_render_pass.clear(); }
1
16,716
Would it be equivalent to check if `chain != nullptr` below? Not suggesting a change, just curious.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -1131,9 +1131,10 @@ bool nano::wallet::change_sync (nano::account const & source_a, nano::account co void nano::wallet::change_async (nano::account const & source_a, nano::account const & representative_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a) { - wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, shared_from_this (), [source_a, representative_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) { + wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, shared_from_this (), [this, source_a, representative_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) { auto block (wallet_a.change_action (source_a, representative_a, work_a, generate_work_a)); action_a (block); + this->wallets.queue_work_regeneration (std::chrono::steady_clock::now (), block); }); }
1
#include <nano/node/wallet.hpp> #include <nano/crypto_lib/random_pool.hpp> #include <nano/lib/utility.hpp> #include <nano/node/node.hpp> #include <nano/node/wallet.hpp> #include <nano/node/xorshift.hpp> #include <argon2.h> #include <boost/filesystem.hpp> #include <boost/polymorphic_cast.hpp> #include <future> nano::uint256_union nano::wallet_store::check (nano::transaction const & transaction_a) { nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::check_special)); return value.key; } nano::uint256_union nano::wallet_store::salt (nano::transaction const & transaction_a) { nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::salt_special)); return value.key; } void nano::wallet_store::wallet_key (nano::raw_key & prv_a, nano::transaction const & transaction_a) { std::lock_guard<std::recursive_mutex> lock (mutex); nano::raw_key wallet_l; wallet_key_mem.value (wallet_l); nano::raw_key password_l; password.value (password_l); prv_a.decrypt (wallet_l.data, password_l, salt (transaction_a).owords[0]); } void nano::wallet_store::seed (nano::raw_key & prv_a, nano::transaction const & transaction_a) { nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::seed_special)); nano::raw_key password_l; wallet_key (password_l, transaction_a); prv_a.decrypt (value.key, password_l, salt (transaction_a).owords[seed_iv_index]); } void nano::wallet_store::seed_set (nano::transaction const & transaction_a, nano::raw_key const & prv_a) { nano::raw_key password_l; wallet_key (password_l, transaction_a); nano::uint256_union ciphertext; ciphertext.encrypt (prv_a, password_l, salt (transaction_a).owords[seed_iv_index]); entry_put_raw (transaction_a, nano::wallet_store::seed_special, nano::wallet_value (ciphertext, 0)); deterministic_clear (transaction_a); } nano::public_key nano::wallet_store::deterministic_insert (nano::transaction const & transaction_a) { auto index (deterministic_index_get (transaction_a)); nano::raw_key prv; deterministic_key (prv, transaction_a, index); nano::public_key result (nano::pub_key (prv.data)); while (exists (transaction_a, result)) { ++index; deterministic_key (prv, transaction_a, index); result = nano::pub_key (prv.data); } uint64_t marker (1); marker <<= 32; marker |= index; entry_put_raw (transaction_a, result, nano::wallet_value (nano::uint256_union (marker), 0)); ++index; deterministic_index_set (transaction_a, index); return result; } nano::public_key nano::wallet_store::deterministic_insert (nano::transaction const & transaction_a, uint32_t const index) { nano::raw_key prv; deterministic_key (prv, transaction_a, index); nano::public_key result (nano::pub_key (prv.data)); uint64_t marker (1); marker <<= 32; marker |= index; entry_put_raw (transaction_a, result, nano::wallet_value (nano::uint256_union (marker), 0)); return result; } void nano::wallet_store::deterministic_key (nano::raw_key & prv_a, nano::transaction const & transaction_a, uint32_t index_a) { assert (valid_password (transaction_a)); nano::raw_key seed_l; seed (seed_l, transaction_a); nano::deterministic_key (seed_l.data, index_a, prv_a.data); } uint32_t nano::wallet_store::deterministic_index_get (nano::transaction const & transaction_a) { nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::deterministic_index_special)); return static_cast<uint32_t> (value.key.number () & static_cast<uint32_t> (-1)); } void nano::wallet_store::deterministic_index_set (nano::transaction const & transaction_a, uint32_t index_a) { nano::uint256_union index_l (index_a); nano::wallet_value value (index_l, 0); entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, value); } void nano::wallet_store::deterministic_clear (nano::transaction const & transaction_a) { nano::uint256_union key (0); for (auto i (begin (transaction_a)), n (end ()); i != n;) { switch (key_type (nano::wallet_value (i->second))) { case nano::key_type::deterministic: { nano::uint256_union key (i->first); erase (transaction_a, key); i = begin (transaction_a, key); break; } default: { ++i; break; } } } deterministic_index_set (transaction_a, 0); } bool nano::wallet_store::valid_password (nano::transaction const & transaction_a) { nano::raw_key zero; zero.data.clear (); nano::raw_key wallet_key_l; wallet_key (wallet_key_l, transaction_a); nano::uint256_union check_l; check_l.encrypt (zero, wallet_key_l, salt (transaction_a).owords[check_iv_index]); bool ok = check (transaction_a) == check_l; return ok; } bool nano::wallet_store::attempt_password (nano::transaction const & transaction_a, std::string const & password_a) { bool result = false; { std::lock_guard<std::recursive_mutex> lock (mutex); nano::raw_key password_l; derive_key (password_l, transaction_a, password_a); password.value_set (password_l); result = !valid_password (transaction_a); } if (!result) { switch (version (transaction_a)) { case version_1: upgrade_v1_v2 (transaction_a); case version_2: upgrade_v2_v3 (transaction_a); case version_3: upgrade_v3_v4 (transaction_a); case version_4: break; default: assert (false); } } return result; } bool nano::wallet_store::rekey (nano::transaction const & transaction_a, std::string const & password_a) { std::lock_guard<std::recursive_mutex> lock (mutex); bool result (false); if (valid_password (transaction_a)) { nano::raw_key password_new; derive_key (password_new, transaction_a, password_a); nano::raw_key wallet_key_l; wallet_key (wallet_key_l, transaction_a); nano::raw_key password_l; password.value (password_l); password.value_set (password_new); nano::uint256_union encrypted; encrypted.encrypt (wallet_key_l, password_new, salt (transaction_a).owords[0]); nano::raw_key wallet_enc; wallet_enc.data = encrypted; wallet_key_mem.value_set (wallet_enc); entry_put_raw (transaction_a, nano::wallet_store::wallet_key_special, nano::wallet_value (encrypted, 0)); } else { result = true; } return result; } void nano::wallet_store::derive_key (nano::raw_key & prv_a, nano::transaction const & transaction_a, std::string const & password_a) { auto salt_l (salt (transaction_a)); kdf.phs (prv_a, password_a, salt_l); } nano::fan::fan (nano::uint256_union const & key, size_t count_a) { std::unique_ptr<nano::uint256_union> first (new nano::uint256_union (key)); for (auto i (1); i < count_a; ++i) { std::unique_ptr<nano::uint256_union> entry (new nano::uint256_union); nano::random_pool::generate_block (entry->bytes.data (), entry->bytes.size ()); *first ^= *entry; values.push_back (std::move (entry)); } values.push_back (std::move (first)); } void nano::fan::value (nano::raw_key & prv_a) { std::lock_guard<std::mutex> lock (mutex); value_get (prv_a); } void nano::fan::value_get (nano::raw_key & prv_a) { assert (!mutex.try_lock ()); prv_a.data.clear (); for (auto & i : values) { prv_a.data ^= *i; } } void nano::fan::value_set (nano::raw_key const & value_a) { std::lock_guard<std::mutex> lock (mutex); nano::raw_key value_l; value_get (value_l); *(values[0]) ^= value_l.data; *(values[0]) ^= value_a.data; } // Wallet version number nano::uint256_union const nano::wallet_store::version_special (0); // Random number used to salt private key encryption nano::uint256_union const nano::wallet_store::salt_special (1); // Key used to encrypt wallet keys, encrypted itself by the user password nano::uint256_union const nano::wallet_store::wallet_key_special (2); // Check value used to see if password is valid nano::uint256_union const nano::wallet_store::check_special (3); // Representative account to be used if we open a new account nano::uint256_union const nano::wallet_store::representative_special (4); // Wallet seed for deterministic key generation nano::uint256_union const nano::wallet_store::seed_special (5); // Current key index for deterministic keys nano::uint256_union const nano::wallet_store::deterministic_index_special (6); int const nano::wallet_store::special_count (7); size_t const nano::wallet_store::check_iv_index (0); size_t const nano::wallet_store::seed_iv_index (1); nano::wallet_store::wallet_store (bool & init_a, nano::kdf & kdf_a, nano::transaction & transaction_a, nano::account representative_a, unsigned fanout_a, std::string const & wallet_a, std::string const & json_a) : password (0, fanout_a), wallet_key_mem (0, fanout_a), kdf (kdf_a) { init_a = false; initialize (transaction_a, init_a, wallet_a); if (!init_a) { MDB_val junk; assert (mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &junk) == MDB_NOTFOUND); boost::property_tree::ptree wallet_l; std::stringstream istream (json_a); try { boost::property_tree::read_json (istream, wallet_l); } catch (...) { init_a = true; } for (auto i (wallet_l.begin ()), n (wallet_l.end ()); i != n; ++i) { nano::uint256_union key; init_a = key.decode_hex (i->first); if (!init_a) { nano::uint256_union value; init_a = value.decode_hex (wallet_l.get<std::string> (i->first)); if (!init_a) { entry_put_raw (transaction_a, key, nano::wallet_value (value, 0)); } else { init_a = true; } } else { init_a = true; } } init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &junk) != 0; init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (wallet_key_special), &junk) != 0; init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (salt_special), &junk) != 0; init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (check_special), &junk) != 0; init_a |= mdb_get (tx (transaction_a), handle, nano::mdb_val (representative_special), &junk) != 0; nano::raw_key key; key.data.clear (); password.value_set (key); key.data = entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special).key; wallet_key_mem.value_set (key); } } nano::wallet_store::wallet_store (bool & init_a, nano::kdf & kdf_a, nano::transaction & transaction_a, nano::account representative_a, unsigned fanout_a, std::string const & wallet_a) : password (0, fanout_a), wallet_key_mem (0, fanout_a), kdf (kdf_a) { init_a = false; initialize (transaction_a, init_a, wallet_a); if (!init_a) { int version_status; MDB_val version_value; version_status = mdb_get (tx (transaction_a), handle, nano::mdb_val (version_special), &version_value); if (version_status == MDB_NOTFOUND) { version_put (transaction_a, version_current); nano::uint256_union salt_l; random_pool::generate_block (salt_l.bytes.data (), salt_l.bytes.size ()); entry_put_raw (transaction_a, nano::wallet_store::salt_special, nano::wallet_value (salt_l, 0)); // Wallet key is a fixed random key that encrypts all entries nano::raw_key wallet_key; random_pool::generate_block (wallet_key.data.bytes.data (), sizeof (wallet_key.data.bytes)); nano::raw_key password_l; password_l.data.clear (); password.value_set (password_l); nano::raw_key zero; zero.data.clear (); // Wallet key is encrypted by the user's password nano::uint256_union encrypted; encrypted.encrypt (wallet_key, zero, salt_l.owords[0]); entry_put_raw (transaction_a, nano::wallet_store::wallet_key_special, nano::wallet_value (encrypted, 0)); nano::raw_key wallet_key_enc; wallet_key_enc.data = encrypted; wallet_key_mem.value_set (wallet_key_enc); nano::uint256_union check; check.encrypt (zero, wallet_key, salt_l.owords[check_iv_index]); entry_put_raw (transaction_a, nano::wallet_store::check_special, nano::wallet_value (check, 0)); entry_put_raw (transaction_a, nano::wallet_store::representative_special, nano::wallet_value (representative_a, 0)); nano::raw_key seed; random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ()); seed_set (transaction_a, seed); entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, nano::wallet_value (nano::uint256_union (0), 0)); } } nano::raw_key key; key.data = entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special).key; wallet_key_mem.value_set (key); } std::vector<nano::account> nano::wallet_store::accounts (nano::transaction const & transaction_a) { std::vector<nano::account> result; for (auto i (begin (transaction_a)), n (end ()); i != n; ++i) { nano::account account (i->first); result.push_back (account); } return result; } void nano::wallet_store::initialize (nano::transaction const & transaction_a, bool & init_a, std::string const & path_a) { assert (strlen (path_a.c_str ()) == path_a.size ()); auto error (0); error |= mdb_dbi_open (tx (transaction_a), path_a.c_str (), MDB_CREATE, &handle); init_a = error != 0; } bool nano::wallet_store::is_representative (nano::transaction const & transaction_a) { return exists (transaction_a, representative (transaction_a)); } void nano::wallet_store::representative_set (nano::transaction const & transaction_a, nano::account const & representative_a) { entry_put_raw (transaction_a, nano::wallet_store::representative_special, nano::wallet_value (representative_a, 0)); } nano::account nano::wallet_store::representative (nano::transaction const & transaction_a) { nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::representative_special)); return value.key; } nano::public_key nano::wallet_store::insert_adhoc (nano::transaction const & transaction_a, nano::raw_key const & prv) { assert (valid_password (transaction_a)); nano::public_key pub (nano::pub_key (prv.data)); nano::raw_key password_l; wallet_key (password_l, transaction_a); nano::uint256_union ciphertext; ciphertext.encrypt (prv, password_l, pub.owords[0].number ()); entry_put_raw (transaction_a, pub, nano::wallet_value (ciphertext, 0)); return pub; } void nano::wallet_store::insert_watch (nano::transaction const & transaction_a, nano::public_key const & pub) { entry_put_raw (transaction_a, pub, nano::wallet_value (nano::uint256_union (0), 0)); } void nano::wallet_store::erase (nano::transaction const & transaction_a, nano::public_key const & pub) { auto status (mdb_del (tx (transaction_a), handle, nano::mdb_val (pub), nullptr)); assert (status == 0); } nano::wallet_value nano::wallet_store::entry_get_raw (nano::transaction const & transaction_a, nano::public_key const & pub_a) { nano::wallet_value result; nano::mdb_val value; auto status (mdb_get (tx (transaction_a), handle, nano::mdb_val (pub_a), value)); if (status == 0) { result = nano::wallet_value (value); } else { result.key.clear (); result.work = 0; } return result; } void nano::wallet_store::entry_put_raw (nano::transaction const & transaction_a, nano::public_key const & pub_a, nano::wallet_value const & entry_a) { auto status (mdb_put (tx (transaction_a), handle, nano::mdb_val (pub_a), entry_a.val (), 0)); assert (status == 0); } nano::key_type nano::wallet_store::key_type (nano::wallet_value const & value_a) { auto number (value_a.key.number ()); nano::key_type result; auto text (number.convert_to<std::string> ()); if (number > std::numeric_limits<uint64_t>::max ()) { result = nano::key_type::adhoc; } else { if ((number >> 32).convert_to<uint32_t> () == 1) { result = nano::key_type::deterministic; } else { result = nano::key_type::unknown; } } return result; } bool nano::wallet_store::fetch (nano::transaction const & transaction_a, nano::public_key const & pub, nano::raw_key & prv) { auto result (false); if (valid_password (transaction_a)) { nano::wallet_value value (entry_get_raw (transaction_a, pub)); if (!value.key.is_zero ()) { switch (key_type (value)) { case nano::key_type::deterministic: { nano::raw_key seed_l; seed (seed_l, transaction_a); uint32_t index (static_cast<uint32_t> (value.key.number () & static_cast<uint32_t> (-1))); deterministic_key (prv, transaction_a, index); break; } case nano::key_type::adhoc: { // Ad-hoc keys nano::raw_key password_l; wallet_key (password_l, transaction_a); prv.decrypt (value.key, password_l, pub.owords[0].number ()); break; } default: { result = true; break; } } } else { result = true; } } else { result = true; } if (!result) { nano::public_key compare (nano::pub_key (prv.data)); if (!(pub == compare)) { result = true; } } return result; } bool nano::wallet_store::exists (nano::transaction const & transaction_a, nano::public_key const & pub) { return !pub.is_zero () && find (transaction_a, pub) != end (); } void nano::wallet_store::serialize_json (nano::transaction const & transaction_a, std::string & string_a) { boost::property_tree::ptree tree; for (nano::store_iterator<nano::uint256_union, nano::wallet_value> i (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle)), n (nullptr); i != n; ++i) { tree.put (i->first.to_string (), i->second.key.to_string ()); } std::stringstream ostream; boost::property_tree::write_json (ostream, tree); string_a = ostream.str (); } void nano::wallet_store::write_backup (nano::transaction const & transaction_a, boost::filesystem::path const & path_a) { std::ofstream backup_file; backup_file.open (path_a.string ()); if (!backup_file.fail ()) { // Set permissions to 600 boost::system::error_code ec; nano::set_secure_perm_file (path_a, ec); std::string json; serialize_json (transaction_a, json); backup_file << json; } } bool nano::wallet_store::move (nano::transaction const & transaction_a, nano::wallet_store & other_a, std::vector<nano::public_key> const & keys) { assert (valid_password (transaction_a)); assert (other_a.valid_password (transaction_a)); auto result (false); for (auto i (keys.begin ()), n (keys.end ()); i != n; ++i) { nano::raw_key prv; auto error (other_a.fetch (transaction_a, *i, prv)); result = result | error; if (!result) { insert_adhoc (transaction_a, prv); other_a.erase (transaction_a, *i); } } return result; } bool nano::wallet_store::import (nano::transaction const & transaction_a, nano::wallet_store & other_a) { assert (valid_password (transaction_a)); assert (other_a.valid_password (transaction_a)); auto result (false); for (auto i (other_a.begin (transaction_a)), n (end ()); i != n; ++i) { nano::raw_key prv; auto error (other_a.fetch (transaction_a, nano::uint256_union (i->first), prv)); result = result | error; if (!result) { if (!prv.data.is_zero ()) { insert_adhoc (transaction_a, prv); } else { insert_watch (transaction_a, nano::uint256_union (i->first)); } other_a.erase (transaction_a, nano::uint256_union (i->first)); } } return result; } bool nano::wallet_store::work_get (nano::transaction const & transaction_a, nano::public_key const & pub_a, uint64_t & work_a) { auto result (false); auto entry (entry_get_raw (transaction_a, pub_a)); if (!entry.key.is_zero ()) { work_a = entry.work; } else { result = true; } return result; } void nano::wallet_store::work_put (nano::transaction const & transaction_a, nano::public_key const & pub_a, uint64_t work_a) { auto entry (entry_get_raw (transaction_a, pub_a)); assert (!entry.key.is_zero ()); entry.work = work_a; entry_put_raw (transaction_a, pub_a, entry); } unsigned nano::wallet_store::version (nano::transaction const & transaction_a) { nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::version_special)); auto entry (value.key); auto result (static_cast<unsigned> (entry.bytes[31])); return result; } void nano::wallet_store::version_put (nano::transaction const & transaction_a, unsigned version_a) { nano::uint256_union entry (version_a); entry_put_raw (transaction_a, nano::wallet_store::version_special, nano::wallet_value (entry, 0)); } void nano::wallet_store::upgrade_v1_v2 (nano::transaction const & transaction_a) { assert (version (transaction_a) == 1); nano::raw_key zero_password; nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::wallet_key_special)); nano::raw_key kdf; kdf.data.clear (); zero_password.decrypt (value.key, kdf, salt (transaction_a).owords[0]); derive_key (kdf, transaction_a, ""); nano::raw_key empty_password; empty_password.decrypt (value.key, kdf, salt (transaction_a).owords[0]); for (auto i (begin (transaction_a)), n (end ()); i != n; ++i) { nano::public_key key (i->first); nano::raw_key prv; if (fetch (transaction_a, key, prv)) { // Key failed to decrypt despite valid password nano::wallet_value data (entry_get_raw (transaction_a, key)); prv.decrypt (data.key, zero_password, salt (transaction_a).owords[0]); nano::public_key compare (nano::pub_key (prv.data)); if (compare == key) { // If we successfully decrypted it, rewrite the key back with the correct wallet key insert_adhoc (transaction_a, prv); } else { // Also try the empty password nano::wallet_value data (entry_get_raw (transaction_a, key)); prv.decrypt (data.key, empty_password, salt (transaction_a).owords[0]); nano::public_key compare (nano::pub_key (prv.data)); if (compare == key) { // If we successfully decrypted it, rewrite the key back with the correct wallet key insert_adhoc (transaction_a, prv); } } } } version_put (transaction_a, 2); } void nano::wallet_store::upgrade_v2_v3 (nano::transaction const & transaction_a) { assert (version (transaction_a) == 2); nano::raw_key seed; random_pool::generate_block (seed.data.bytes.data (), seed.data.bytes.size ()); seed_set (transaction_a, seed); entry_put_raw (transaction_a, nano::wallet_store::deterministic_index_special, nano::wallet_value (nano::uint256_union (0), 0)); version_put (transaction_a, 3); } void nano::wallet_store::upgrade_v3_v4 (nano::transaction const & transaction_a) { assert (version (transaction_a) == 3); version_put (transaction_a, 4); assert (valid_password (transaction_a)); nano::raw_key seed; nano::wallet_value value (entry_get_raw (transaction_a, nano::wallet_store::seed_special)); nano::raw_key password_l; wallet_key (password_l, transaction_a); seed.decrypt (value.key, password_l, salt (transaction_a).owords[0]); nano::uint256_union ciphertext; ciphertext.encrypt (seed, password_l, salt (transaction_a).owords[seed_iv_index]); entry_put_raw (transaction_a, nano::wallet_store::seed_special, nano::wallet_value (ciphertext, 0)); for (auto i (begin (transaction_a)), n (end ()); i != n; ++i) { nano::wallet_value value (i->second); if (!value.key.is_zero ()) { switch (key_type (i->second)) { case nano::key_type::adhoc: { nano::raw_key key; if (fetch (transaction_a, nano::public_key (i->first), key)) { // Key failed to decrypt despite valid password key.decrypt (value.key, password_l, salt (transaction_a).owords[0]); nano::uint256_union new_key_ciphertext; new_key_ciphertext.encrypt (key, password_l, (nano::uint256_union (i->first)).owords[0].number ()); nano::wallet_value new_value (new_key_ciphertext, value.work); erase (transaction_a, nano::public_key (i->first)); entry_put_raw (transaction_a, nano::public_key (i->first), new_value); } } case nano::key_type::deterministic: break; default: assert (false); } } } } void nano::kdf::phs (nano::raw_key & result_a, std::string const & password_a, nano::uint256_union const & salt_a) { static nano::network_params network_params; std::lock_guard<std::mutex> lock (mutex); auto success (argon2_hash (1, network_params.kdf_work, 1, password_a.data (), password_a.size (), salt_a.bytes.data (), salt_a.bytes.size (), result_a.data.bytes.data (), result_a.data.bytes.size (), NULL, 0, Argon2_d, 0x10)); assert (success == 0); (void)success; } nano::wallet::wallet (bool & init_a, nano::transaction & transaction_a, nano::wallets & wallets_a, std::string const & wallet_a) : lock_observer ([](bool, bool) {}), store (init_a, wallets_a.kdf, transaction_a, wallets_a.node.config.random_representative (), wallets_a.node.config.password_fanout, wallet_a), wallets (wallets_a) { } nano::wallet::wallet (bool & init_a, nano::transaction & transaction_a, nano::wallets & wallets_a, std::string const & wallet_a, std::string const & json) : lock_observer ([](bool, bool) {}), store (init_a, wallets_a.kdf, transaction_a, wallets_a.node.config.random_representative (), wallets_a.node.config.password_fanout, wallet_a, json), wallets (wallets_a) { } void nano::wallet::enter_initial_password () { nano::raw_key password_l; { std::lock_guard<std::recursive_mutex> lock (store.mutex); store.password.value (password_l); } if (password_l.data.is_zero ()) { auto transaction (wallets.tx_begin_write ()); if (store.valid_password (transaction)) { // Newly created wallets have a zero key store.rekey (transaction, ""); } else { enter_password (transaction, ""); } } } bool nano::wallet::enter_password (nano::transaction const & transaction_a, std::string const & password_a) { auto result (store.attempt_password (transaction_a, password_a)); if (!result) { auto this_l (shared_from_this ()); wallets.node.background ([this_l]() { this_l->search_pending (); }); wallets.node.logger.try_log ("Wallet unlocked"); } else { wallets.node.logger.try_log ("Invalid password, wallet locked"); } lock_observer (result, password_a.empty ()); return result; } nano::public_key nano::wallet::deterministic_insert (nano::transaction const & transaction_a, bool generate_work_a) { nano::public_key key (0); if (store.valid_password (transaction_a)) { key = store.deterministic_insert (transaction_a); if (generate_work_a) { work_ensure (key, key); } auto block_transaction (wallets.node.store.tx_begin_read ()); if (wallets.node.ledger.weight (block_transaction, key) >= wallets.node.config.vote_minimum.number ()) { std::lock_guard<std::mutex> lock (representatives_mutex); representatives.insert (key); ++wallets.reps_count; } } return key; } nano::public_key nano::wallet::deterministic_insert (uint32_t const index, bool generate_work_a) { auto transaction (wallets.tx_begin_write ()); nano::public_key key (0); if (store.valid_password (transaction)) { key = store.deterministic_insert (transaction, index); if (generate_work_a) { work_ensure (key, key); } } return key; } nano::public_key nano::wallet::deterministic_insert (bool generate_work_a) { auto transaction (wallets.tx_begin_write ()); auto result (deterministic_insert (transaction, generate_work_a)); return result; } nano::public_key nano::wallet::insert_adhoc (nano::transaction const & transaction_a, nano::raw_key const & key_a, bool generate_work_a) { nano::public_key key (0); if (store.valid_password (transaction_a)) { key = store.insert_adhoc (transaction_a, key_a); auto block_transaction (wallets.node.store.tx_begin_read ()); if (generate_work_a) { work_ensure (key, wallets.node.ledger.latest_root (block_transaction, key)); } if (wallets.node.ledger.weight (block_transaction, key) >= wallets.node.config.vote_minimum.number ()) { std::lock_guard<std::mutex> lock (representatives_mutex); representatives.insert (key); ++wallets.reps_count; } } return key; } nano::public_key nano::wallet::insert_adhoc (nano::raw_key const & account_a, bool generate_work_a) { auto transaction (wallets.tx_begin_write ()); auto result (insert_adhoc (transaction, account_a, generate_work_a)); return result; } void nano::wallet::insert_watch (nano::transaction const & transaction_a, nano::public_key const & pub_a) { store.insert_watch (transaction_a, pub_a); } bool nano::wallet::exists (nano::public_key const & account_a) { auto transaction (wallets.tx_begin_read ()); return store.exists (transaction, account_a); } bool nano::wallet::import (std::string const & json_a, std::string const & password_a) { auto error (false); std::unique_ptr<nano::wallet_store> temp; { auto transaction (wallets.tx_begin_write ()); nano::uint256_union id; random_pool::generate_block (id.bytes.data (), id.bytes.size ()); temp.reset (new nano::wallet_store (error, wallets.node.wallets.kdf, transaction, 0, 1, id.to_string (), json_a)); } if (!error) { auto transaction (wallets.tx_begin_write ()); error = temp->attempt_password (transaction, password_a); } auto transaction (wallets.tx_begin_write ()); if (!error) { error = store.import (transaction, *temp); } temp->destroy (transaction); return error; } void nano::wallet::serialize (std::string & json_a) { auto transaction (wallets.tx_begin_read ()); store.serialize_json (transaction, json_a); } void nano::wallet_store::destroy (nano::transaction const & transaction_a) { auto status (mdb_drop (tx (transaction_a), handle, 1)); assert (status == 0); handle = 0; } std::shared_ptr<nano::block> nano::wallet::receive_action (nano::block const & send_a, nano::account const & representative_a, nano::uint128_union const & amount_a, uint64_t work_a, bool generate_work_a) { nano::account account; auto hash (send_a.hash ()); std::shared_ptr<nano::block> block; if (wallets.node.config.receive_minimum.number () <= amount_a.number ()) { auto block_transaction (wallets.node.ledger.store.tx_begin_read ()); auto transaction (wallets.tx_begin_read ()); nano::pending_info pending_info; if (wallets.node.store.block_exists (block_transaction, hash)) { account = wallets.node.ledger.block_destination (block_transaction, send_a); if (!wallets.node.ledger.store.pending_get (block_transaction, nano::pending_key (account, hash), pending_info)) { nano::raw_key prv; if (!store.fetch (transaction, account, prv)) { if (work_a == 0) { store.work_get (transaction, account, work_a); } nano::account_info info; auto new_account (wallets.node.ledger.store.account_get (block_transaction, account, info)); if (!new_account) { std::shared_ptr<nano::block> rep_block = wallets.node.ledger.store.block_get (block_transaction, info.rep_block); assert (rep_block != nullptr); block.reset (new nano::state_block (account, info.head, rep_block->representative (), info.balance.number () + pending_info.amount.number (), hash, prv, account, work_a)); } else { block.reset (new nano::state_block (account, 0, representative_a, pending_info.amount, hash, prv, account, work_a)); } } else { wallets.node.logger.try_log ("Unable to receive, wallet locked"); } } else { // Ledger doesn't have this marked as available to receive anymore } } else { // Ledger doesn't have this block anymore. } } else { wallets.node.logger.try_log (boost::str (boost::format ("Not receiving block %1% due to minimum receive threshold") % hash.to_string ())); // Someone sent us something below the threshold of receiving } if (block != nullptr) { if (nano::work_validate (*block)) { wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block->hash ().to_string () % account.to_account ())); wallets.node.work_generate_blocking (*block); } wallets.node.process_active (block); wallets.node.block_processor.flush (); if (generate_work_a) { work_ensure (account, block->hash ()); } } return block; } std::shared_ptr<nano::block> nano::wallet::change_action (nano::account const & source_a, nano::account const & representative_a, uint64_t work_a, bool generate_work_a) { std::shared_ptr<nano::block> block; { auto transaction (wallets.tx_begin_read ()); auto block_transaction (wallets.node.store.tx_begin ()); if (store.valid_password (transaction)) { auto existing (store.find (transaction, source_a)); if (existing != store.end () && !wallets.node.ledger.latest (block_transaction, source_a).is_zero ()) { nano::account_info info; auto error1 (wallets.node.ledger.store.account_get (block_transaction, source_a, info)); assert (!error1); nano::raw_key prv; auto error2 (store.fetch (transaction, source_a, prv)); assert (!error2); if (work_a == 0) { store.work_get (transaction, source_a, work_a); } block.reset (new nano::state_block (source_a, info.head, representative_a, info.balance, 0, prv, source_a, work_a)); } } } if (block != nullptr) { if (nano::work_validate (*block)) { wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block->hash ().to_string () % source_a.to_account ())); wallets.node.work_generate_blocking (*block); } wallets.node.process_active (block); wallets.node.block_processor.flush (); if (generate_work_a) { work_ensure (source_a, block->hash ()); } } return block; } std::shared_ptr<nano::block> nano::wallet::send_action (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a, uint64_t work_a, bool generate_work_a, boost::optional<std::string> id_a) { std::shared_ptr<nano::block> block; boost::optional<nano::mdb_val> id_mdb_val; if (id_a) { id_mdb_val = nano::mdb_val (id_a->size (), const_cast<char *> (id_a->data ())); } bool error = false; bool cached_block = false; { auto transaction (wallets.tx_begin ((bool)id_mdb_val)); auto block_transaction (wallets.node.store.tx_begin_read ()); if (id_mdb_val) { nano::mdb_val result; auto status (mdb_get (wallets.env.tx (transaction), wallets.node.wallets.send_action_ids, *id_mdb_val, result)); if (status == 0) { nano::uint256_union hash (result); block = wallets.node.store.block_get (block_transaction, hash); if (block != nullptr) { cached_block = true; wallets.node.network.flood_block (block); } } else if (status != MDB_NOTFOUND) { error = true; } } if (!error && block == nullptr) { if (store.valid_password (transaction)) { auto existing (store.find (transaction, source_a)); if (existing != store.end ()) { auto balance (wallets.node.ledger.account_balance (block_transaction, source_a)); if (!balance.is_zero () && balance >= amount_a) { nano::account_info info; auto error1 (wallets.node.ledger.store.account_get (block_transaction, source_a, info)); assert (!error1); nano::raw_key prv; auto error2 (store.fetch (transaction, source_a, prv)); assert (!error2); std::shared_ptr<nano::block> rep_block = wallets.node.ledger.store.block_get (block_transaction, info.rep_block); assert (rep_block != nullptr); if (work_a == 0) { store.work_get (transaction, source_a, work_a); } block.reset (new nano::state_block (source_a, info.head, rep_block->representative (), balance - amount_a, account_a, prv, source_a, work_a)); if (id_mdb_val && block != nullptr) { auto status (mdb_put (wallets.env.tx (transaction), wallets.node.wallets.send_action_ids, *id_mdb_val, nano::mdb_val (block->hash ()), 0)); if (status != 0) { block = nullptr; error = true; } } } } } } } if (!error && block != nullptr && !cached_block) { if (nano::work_validate (*block)) { wallets.node.logger.try_log (boost::str (boost::format ("Cached or provided work for block %1% account %2% is invalid, regenerating") % block->hash ().to_string () % account_a.to_account ())); wallets.node.work_generate_blocking (*block); } wallets.node.process_active (block); wallets.node.block_processor.flush (); if (generate_work_a) { work_ensure (source_a, block->hash ()); } } return block; } bool nano::wallet::change_sync (nano::account const & source_a, nano::account const & representative_a) { std::promise<bool> result; std::future<bool> future = result.get_future (); // clang-format off change_async (source_a, representative_a, [&result](std::shared_ptr<nano::block> block_a) { result.set_value (block_a == nullptr); }, true); // clang-format on return future.get (); } void nano::wallet::change_async (nano::account const & source_a, nano::account const & representative_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a) { wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, shared_from_this (), [source_a, representative_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) { auto block (wallet_a.change_action (source_a, representative_a, work_a, generate_work_a)); action_a (block); }); } bool nano::wallet::receive_sync (std::shared_ptr<nano::block> block_a, nano::account const & representative_a, nano::uint128_t const & amount_a) { std::promise<bool> result; std::future<bool> future = result.get_future (); // clang-format off receive_async (block_a, representative_a, amount_a, [&result](std::shared_ptr<nano::block> block_a) { result.set_value (block_a == nullptr); }, true); // clang-format on return future.get (); } void nano::wallet::receive_async (std::shared_ptr<nano::block> block_a, nano::account const & representative_a, nano::uint128_t const & amount_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a) { wallets.node.wallets.queue_wallet_action (amount_a, shared_from_this (), [block_a, representative_a, amount_a, action_a, work_a, generate_work_a](nano::wallet & wallet_a) { auto block (wallet_a.receive_action (*block_a, representative_a, amount_a, work_a, generate_work_a)); action_a (block); }); } nano::block_hash nano::wallet::send_sync (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a) { std::promise<nano::block_hash> result; std::future<nano::block_hash> future = result.get_future (); // clang-format off send_async (source_a, account_a, amount_a, [&result](std::shared_ptr<nano::block> block_a) { result.set_value (block_a->hash ()); }, true); // clang-format on return future.get (); } void nano::wallet::send_async (nano::account const & source_a, nano::account const & account_a, nano::uint128_t const & amount_a, std::function<void(std::shared_ptr<nano::block>)> const & action_a, uint64_t work_a, bool generate_work_a, boost::optional<std::string> id_a) { wallets.node.wallets.queue_wallet_action (nano::wallets::high_priority, shared_from_this (), [source_a, account_a, amount_a, action_a, work_a, generate_work_a, id_a](nano::wallet & wallet_a) { auto block (wallet_a.send_action (source_a, account_a, amount_a, work_a, generate_work_a, id_a)); action_a (block); }); } // Update work for account if latest root is root_a void nano::wallet::work_update (nano::transaction const & transaction_a, nano::account const & account_a, nano::block_hash const & root_a, uint64_t work_a) { assert (!nano::work_validate (root_a, work_a)); assert (store.exists (transaction_a, account_a)); auto block_transaction (wallets.node.store.tx_begin_read ()); auto latest (wallets.node.ledger.latest_root (block_transaction, account_a)); if (latest == root_a) { store.work_put (transaction_a, account_a, work_a); } else { wallets.node.logger.try_log ("Cached work no longer valid, discarding"); } } void nano::wallet::work_ensure (nano::account const & account_a, nano::block_hash const & hash_a) { wallets.node.wallets.queue_wallet_action (nano::wallets::generate_priority, shared_from_this (), [account_a, hash_a](nano::wallet & wallet_a) { wallet_a.work_cache_blocking (account_a, hash_a); }); } bool nano::wallet::search_pending () { auto transaction (wallets.tx_begin_read ()); auto result (!store.valid_password (transaction)); if (!result) { wallets.node.logger.try_log ("Beginning pending block search"); for (auto i (store.begin (transaction)), n (store.end ()); i != n; ++i) { auto block_transaction (wallets.node.store.tx_begin_read ()); nano::account account (i->first); // Don't search pending for watch-only accounts if (!nano::wallet_value (i->second).key.is_zero ()) { for (auto j (wallets.node.store.pending_begin (block_transaction, nano::pending_key (account, 0))); nano::pending_key (j->first).account == account; ++j) { nano::pending_key key (j->first); auto hash (key.hash); nano::pending_info pending (j->second); auto amount (pending.amount.number ()); if (wallets.node.config.receive_minimum.number () <= amount) { wallets.node.logger.try_log (boost::str (boost::format ("Found a pending block %1% for account %2%") % hash.to_string () % pending.source.to_account ())); auto block (wallets.node.store.block_get (block_transaction, hash)); if (wallets.node.ledger.block_confirmed (block_transaction, hash)) { // Receive confirmed block auto node_l (wallets.node.shared ()); wallets.node.background ([node_l, block, hash]() { auto transaction (node_l->store.tx_begin_read ()); node_l->receive_confirmed (transaction, block, hash); }); } else { // Request confirmation for unconfirmed block wallets.node.block_confirm (block); } } } } } wallets.node.logger.try_log ("Pending block search phase complete"); } else { wallets.node.logger.try_log ("Stopping search, wallet is locked"); } return result; } void nano::wallet::init_free_accounts (nano::transaction const & transaction_a) { free_accounts.clear (); for (auto i (store.begin (transaction_a)), n (store.end ()); i != n; ++i) { free_accounts.insert (nano::uint256_union (i->first)); } } uint32_t nano::wallet::deterministic_check (nano::transaction const & transaction_a, uint32_t index) { auto block_transaction (wallets.node.store.tx_begin_read ()); for (uint32_t i (index + 1), n (index + 64); i < n; ++i) { nano::raw_key prv; store.deterministic_key (prv, transaction_a, i); nano::keypair pair (prv.data.to_string ()); // Check if account received at least 1 block auto latest (wallets.node.ledger.latest (block_transaction, pair.pub)); if (!latest.is_zero ()) { index = i; // i + 64 - Check additional 64 accounts // i/64 - Check additional accounts for large wallets. I.e. 64000/64 = 1000 accounts to check n = i + 64 + (i / 64); } else { // Check if there are pending blocks for account for (auto ii (wallets.node.store.pending_begin (block_transaction, nano::pending_key (pair.pub, 0))); nano::pending_key (ii->first).account == pair.pub; ++ii) { index = i; n = i + 64 + (i / 64); break; } } } return index; } nano::public_key nano::wallet::change_seed (nano::transaction const & transaction_a, nano::raw_key const & prv_a, uint32_t count) { store.seed_set (transaction_a, prv_a); auto account = deterministic_insert (transaction_a); if (count == 0) { count = deterministic_check (transaction_a, 0); } for (uint32_t i (0); i < count; ++i) { // Disable work generation to prevent weak CPU nodes stuck account = deterministic_insert (transaction_a, false); } return account; } void nano::wallet::deterministic_restore (nano::transaction const & transaction_a) { auto index (store.deterministic_index_get (transaction_a)); auto new_index (deterministic_check (transaction_a, index)); for (uint32_t i (index); i <= new_index && index != new_index; ++i) { // Disable work generation to prevent weak CPU nodes stuck deterministic_insert (transaction_a, false); } } bool nano::wallet::live () { return store.handle != 0; } void nano::wallet::work_cache_blocking (nano::account const & account_a, nano::block_hash const & root_a) { auto begin (std::chrono::steady_clock::now ()); auto work (wallets.node.work_generate_blocking (root_a)); if (wallets.node.config.logging.work_generation_time ()) { /* * The difficulty parameter is the second parameter for `work_generate_blocking()`, * currently we don't supply one so we must fetch the default value. */ auto difficulty (wallets.node.network_params.network.publish_threshold); wallets.node.logger.try_log ("Work generation for ", root_a.to_string (), ", with a difficulty of ", difficulty, " complete: ", (std::chrono::duration_cast<std::chrono::microseconds> (std::chrono::steady_clock::now () - begin).count ()), " us"); } auto transaction (wallets.tx_begin_write ()); if (live () && store.exists (transaction, account_a)) { work_update (transaction, account_a, root_a, work); } } nano::wallets::wallets (bool & error_a, nano::node & node_a) : observer ([](bool) {}), node (node_a), env (boost::polymorphic_downcast<nano::mdb_wallets_store *> (node_a.wallets_store_impl.get ())->environment), stopped (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::wallet_actions); do_wallet_actions (); }) { std::unique_lock<std::mutex> lock (mutex); if (!error_a) { auto transaction (tx_begin_write ()); auto status (mdb_dbi_open (env.tx (transaction), nullptr, MDB_CREATE, &handle)); split_if_needed (transaction, node.store); status |= mdb_dbi_open (env.tx (transaction), "send_action_ids", MDB_CREATE, &send_action_ids); assert (status == 0); std::string beginning (nano::uint256_union (0).to_string ()); std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ()); nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ())))); nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ())))); for (; i != n; ++i) { nano::uint256_union id; std::string text (i->first.data (), i->first.size ()); auto error (id.decode_hex (text)); assert (!error); assert (items.find (id) == items.end ()); auto wallet (std::make_shared<nano::wallet> (error, transaction, *this, text)); if (!error) { items[id] = wallet; } else { // Couldn't open wallet } } } for (auto & item : items) { item.second->enter_initial_password (); } if (node_a.config.enable_voting) { lock.unlock (); ongoing_compute_reps (); } } nano::wallets::~wallets () { stop (); } std::shared_ptr<nano::wallet> nano::wallets::open (nano::uint256_union const & id_a) { std::lock_guard<std::mutex> lock (mutex); std::shared_ptr<nano::wallet> result; auto existing (items.find (id_a)); if (existing != items.end ()) { result = existing->second; } return result; } std::shared_ptr<nano::wallet> nano::wallets::create (nano::uint256_union const & id_a) { std::lock_guard<std::mutex> lock (mutex); assert (items.find (id_a) == items.end ()); std::shared_ptr<nano::wallet> result; bool error; { auto transaction (tx_begin_write ()); result = std::make_shared<nano::wallet> (error, transaction, *this, id_a.to_string ()); } if (!error) { items[id_a] = result; result->enter_initial_password (); } return result; } bool nano::wallets::search_pending (nano::uint256_union const & wallet_a) { std::lock_guard<std::mutex> lock (mutex); auto result (false); auto existing (items.find (wallet_a)); result = existing == items.end (); if (!result) { auto wallet (existing->second); result = wallet->search_pending (); } return result; } void nano::wallets::search_pending_all () { std::lock_guard<std::mutex> lock (mutex); for (auto i : items) { i.second->search_pending (); } } void nano::wallets::destroy (nano::uint256_union const & id_a) { std::lock_guard<std::mutex> lock (mutex); auto transaction (tx_begin_write ()); // action_mutex should be after transactions to prevent deadlocks in deterministic_insert () & insert_adhoc () std::lock_guard<std::mutex> action_lock (action_mutex); auto existing (items.find (id_a)); assert (existing != items.end ()); auto wallet (existing->second); items.erase (existing); wallet->store.destroy (transaction); } void nano::wallets::reload () { std::lock_guard<std::mutex> lock (mutex); auto transaction (tx_begin_write ()); std::unordered_set<nano::uint256_union> stored_items; std::string beginning (nano::uint256_union (0).to_string ()); std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ()); nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ())))); nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ())))); for (; i != n; ++i) { nano::uint256_union id; std::string text (i->first.data (), i->first.size ()); auto error (id.decode_hex (text)); assert (!error); // New wallet if (items.find (id) == items.end ()) { auto wallet (std::make_shared<nano::wallet> (error, transaction, *this, text)); if (!error) { items[id] = wallet; } } // List of wallets on disk stored_items.insert (id); } // Delete non existing wallets from memory std::vector<nano::uint256_union> deleted_items; for (auto i : items) { if (stored_items.find (i.first) == stored_items.end ()) { deleted_items.push_back (i.first); } } for (auto & i : deleted_items) { assert (items.find (i) == items.end ()); items.erase (i); } } void nano::wallets::do_wallet_actions () { std::unique_lock<std::mutex> action_lock (action_mutex); while (!stopped) { if (!actions.empty ()) { auto first (actions.begin ()); auto wallet (first->second.first); auto current (std::move (first->second.second)); actions.erase (first); if (wallet->live ()) { action_lock.unlock (); observer (true); current (*wallet); observer (false); action_lock.lock (); } } else { condition.wait (action_lock); } } } void nano::wallets::queue_wallet_action (nano::uint128_t const & amount_a, std::shared_ptr<nano::wallet> wallet_a, std::function<void(nano::wallet &)> const & action_a) { { std::lock_guard<std::mutex> action_lock (action_mutex); actions.insert (std::make_pair (amount_a, std::make_pair (wallet_a, std::move (action_a)))); } condition.notify_all (); } void nano::wallets::foreach_representative (nano::transaction const & transaction_a, std::function<void(nano::public_key const & pub_a, nano::raw_key const & prv_a)> const & action_a) { if (node.config.enable_voting) { std::lock_guard<std::mutex> lock (mutex); auto transaction_l (tx_begin_read ()); for (auto i (items.begin ()), n (items.end ()); i != n; ++i) { auto & wallet (*i->second); std::lock_guard<std::recursive_mutex> store_lock (wallet.store.mutex); std::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex); for (auto ii (wallet.representatives.begin ()), nn (wallet.representatives.end ()); ii != nn; ++ii) { nano::account account (*ii); if (wallet.store.exists (transaction_l, account)) { if (!node.ledger.weight (transaction_a, account).is_zero ()) { if (wallet.store.valid_password (transaction_l)) { nano::raw_key prv; auto error (wallet.store.fetch (transaction_l, account, prv)); assert (!error); action_a (account, prv); } else { static auto last_log = std::chrono::steady_clock::time_point (); if (last_log < std::chrono::steady_clock::now () - std::chrono::seconds (60)) { last_log = std::chrono::steady_clock::now (); node.logger.always_log (boost::str (boost::format ("Representative locked inside wallet %1%") % i->first.to_string ())); } } } } } } } } bool nano::wallets::exists (nano::transaction const & transaction_a, nano::public_key const & account_a) { std::lock_guard<std::mutex> lock (mutex); auto result (false); for (auto i (items.begin ()), n (items.end ()); !result && i != n; ++i) { result = i->second->store.exists (transaction_a, account_a); } return result; } void nano::wallets::stop () { { std::lock_guard<std::mutex> action_lock (action_mutex); stopped = true; actions.clear (); } condition.notify_all (); if (thread.joinable ()) { thread.join (); } } nano::transaction nano::wallets::tx_begin_write () { return tx_begin (true); } nano::transaction nano::wallets::tx_begin_read () { return tx_begin (false); } nano::transaction nano::wallets::tx_begin (bool write_a) { return env.tx_begin (write_a); } void nano::wallets::clear_send_ids (nano::transaction const & transaction_a) { auto status (mdb_drop (env.tx (transaction_a), send_action_ids, 0)); assert (status == 0); } void nano::wallets::compute_reps () { std::lock_guard<std::mutex> lock (mutex); reps_count = 0; auto ledger_transaction (node.store.tx_begin_read ()); auto transaction (tx_begin_read ()); for (auto i (items.begin ()), n (items.end ()); i != n; ++i) { auto & wallet (*i->second); decltype (wallet.representatives) representatives_l; for (auto ii (wallet.store.begin (transaction)), nn (wallet.store.end ()); ii != nn; ++ii) { auto account (ii->first); if (node.ledger.weight (ledger_transaction, account) >= node.config.vote_minimum.number ()) { representatives_l.insert (account); ++reps_count; } } std::lock_guard<std::mutex> representatives_lock (wallet.representatives_mutex); wallet.representatives.swap (representatives_l); } } void nano::wallets::ongoing_compute_reps () { compute_reps (); auto & node_l (node); auto compute_delay (network_params.network.is_test_network () ? std::chrono::milliseconds (10) : std::chrono::milliseconds (15 * 60 * 1000)); // Representation drifts quickly on the test network but very slowly on the live network node.alarm.add (std::chrono::steady_clock::now () + compute_delay, [&node_l]() { node_l.wallets.ongoing_compute_reps (); }); } void nano::wallets::split_if_needed (nano::transaction & transaction_destination, nano::block_store & store_a) { auto store_l (dynamic_cast<nano::mdb_store *> (&store_a)); if (store_l != nullptr) { auto transaction_source (store_l->tx_begin_write ()); MDB_txn * tx_source (*boost::polymorphic_downcast<nano::mdb_txn *> (transaction_source.impl.get ())); if (items.empty ()) { MDB_txn * tx_destination (*boost::polymorphic_downcast<nano::mdb_txn *> (transaction_destination.impl.get ())); std::string beginning (nano::uint256_union (0).to_string ()); std::string end ((nano::uint256_union (nano::uint256_t (0) - nano::uint256_t (1))).to_string ()); nano::store_iterator<std::array<char, 64>, nano::no_value> i (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction_source, handle, nano::mdb_val (beginning.size (), const_cast<char *> (beginning.c_str ())))); nano::store_iterator<std::array<char, 64>, nano::no_value> n (std::make_unique<nano::mdb_iterator<std::array<char, 64>, nano::no_value>> (transaction_source, handle, nano::mdb_val (end.size (), const_cast<char *> (end.c_str ())))); for (; i != n; ++i) { nano::uint256_union id; std::string text (i->first.data (), i->first.size ()); auto error1 (id.decode_hex (text)); assert (!error1); assert (strlen (text.c_str ()) == text.size ()); move_table (text, tx_source, tx_destination); } } } } void nano::wallets::move_table (std::string const & name_a, MDB_txn * tx_source, MDB_txn * tx_destination) { MDB_dbi handle_source; auto error2 (mdb_dbi_open (tx_source, name_a.c_str (), MDB_CREATE, &handle_source)); assert (!error2); MDB_dbi handle_destination; auto error3 (mdb_dbi_open (tx_destination, name_a.c_str (), MDB_CREATE, &handle_destination)); assert (!error3); MDB_cursor * cursor; auto error4 (mdb_cursor_open (tx_source, handle_source, &cursor)); assert (!error4); MDB_val val_key; MDB_val val_value; auto cursor_status (mdb_cursor_get (cursor, &val_key, &val_value, MDB_FIRST)); while (cursor_status == MDB_SUCCESS) { auto error5 (mdb_put (tx_destination, handle_destination, &val_key, &val_value, 0)); assert (!error5); cursor_status = mdb_cursor_get (cursor, &val_key, &val_value, MDB_NEXT); } auto error6 (mdb_drop (tx_source, handle_source, 1)); assert (!error6); } nano::uint128_t const nano::wallets::generate_priority = std::numeric_limits<nano::uint128_t>::max (); nano::uint128_t const nano::wallets::high_priority = std::numeric_limits<nano::uint128_t>::max () - 1; nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::begin (nano::transaction const & transaction_a) { nano::store_iterator<nano::uint256_union, nano::wallet_value> result (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle, nano::mdb_val (nano::uint256_union (special_count)))); return result; } nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::begin (nano::transaction const & transaction_a, nano::uint256_union const & key) { nano::store_iterator<nano::uint256_union, nano::wallet_value> result (std::make_unique<nano::mdb_iterator<nano::uint256_union, nano::wallet_value>> (transaction_a, handle, nano::mdb_val (key))); return result; } nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::find (nano::transaction const & transaction_a, nano::uint256_union const & key) { auto result (begin (transaction_a, key)); nano::store_iterator<nano::uint256_union, nano::wallet_value> end (nullptr); if (result != end) { if (nano::uint256_union (result->first) == key) { return result; } else { return end; } } else { return end; } return result; } nano::store_iterator<nano::uint256_union, nano::wallet_value> nano::wallet_store::end () { return nano::store_iterator<nano::uint256_union, nano::wallet_value> (nullptr); } nano::mdb_wallets_store::mdb_wallets_store (bool & error_a, boost::filesystem::path const & path_a, int lmdb_max_dbs) : environment (error_a, path_a, lmdb_max_dbs, 1ULL * 1024 * 1024 * 1024) { } MDB_txn * nano::wallet_store::tx (nano::transaction const & transaction_a) const { auto result (boost::polymorphic_downcast<nano::mdb_txn *> (transaction_a.impl.get ())); return *result; } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (wallets & wallets, const std::string & name) { size_t items_count = 0; size_t actions_count = 0; { std::lock_guard<std::mutex> guard (wallets.mutex); items_count = wallets.items.size (); actions_count = wallets.actions.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); auto sizeof_item_element = sizeof (decltype (wallets.items)::value_type); auto sizeof_actions_element = sizeof (decltype (wallets.actions)::value_type); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "items", items_count, sizeof_item_element })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "actions_count", actions_count, sizeof_actions_element })); return composite; } }
1
15,337
Should probably do `auto this_l (shared_from_this ());` and pass/use that instead of `this`. Same a few other places. IOW, replace both `shared_from_this()` and `this` with `this_l`
nanocurrency-nano-node
cpp
@@ -55,7 +55,8 @@ module.exports = iterateJsdoc( ) { context.report( { data: { name: jsdocNode.name }, - message: `The first word in a function's description should be a third-person verb (eg "runs" not "run").`, + message: + 'The first word in a function\'s description should be a third-person verb (eg "runs" not "run").', node: jsdocNode, } ); }
1
/** * ESLint rules: verb form. * * Site Kit by Google, Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * External dependencies */ const { default: iterateJsdoc, } = require( 'eslint-plugin-jsdoc/dist/iterateJsdoc' ); /** * Internal dependencies */ const { isDependencyBlock, isFunction } = require( '../utils' ); module.exports = iterateJsdoc( ( { context, jsdoc, jsdocNode, node } ) => { if ( isDependencyBlock( jsdoc ) ) { return; } if ( jsdoc.description && jsdoc.description.match( /Site Kit by Google, Copyright/gm ) ) { return; } // Only apply this rule to code that documents a function; constants don't need third-party // rules and would in fact be made awkward by this rule. // See: https://github.com/google/site-kit-wp/pull/2047#discussion_r498509940. if ( ! isFunction( node ) ) { return; } // Verbs can include dashes or in some cases also parentheses. if ( jsdoc.description && ! jsdoc.description.match( /^[\w\(\)\-]+s\W.*/g ) ) { context.report( { data: { name: jsdocNode.name }, message: `The first word in a function's description should be a third-person verb (eg "runs" not "run").`, node: jsdocNode, } ); } }, { iterateAllJsdocs: true, meta: { docs: { description: `Requires that all functions' first word end with "s" (assuming that it is a third-person verb).`, }, fixable: 'code', type: 'suggestion', }, } );
1
42,326
And here. Please, use `'` for apostrophes in changed strings in this file.
google-site-kit-wp
js
@@ -100,7 +100,7 @@ func (gs *GasStation) EstimateGasForAction(actPb *iotextypes.Action) (uint64, er if err != nil { return 0, err } - _, receipt, err := gs.bc.ExecuteContractRead(callerAddr, sc) + _, receipt, err := gs.bc.SimulateExecution(callerAddr, sc) if err != nil { return 0, err }
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package gasstation import ( "math/big" "sort" "github.com/iotexproject/iotex-address/address" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/blockchain" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-proto/golang/iotextypes" ) // GasStation provide gas related api type GasStation struct { bc blockchain.Blockchain cfg config.API } // NewGasStation creates a new gas station func NewGasStation(bc blockchain.Blockchain, cfg config.API) *GasStation { return &GasStation{ bc: bc, cfg: cfg, } } //IsSystemAction determine whether input action belongs to system action func (gs *GasStation) IsSystemAction(act action.SealedEnvelope) bool { switch act.Action().(type) { case *action.GrantReward: return true case *action.PutPollResult: return true default: return false } } // SuggestGasPrice suggest gas price func (gs *GasStation) SuggestGasPrice() (uint64, error) { var smallestPrices []*big.Int tip := gs.bc.TipHeight() endBlockHeight := uint64(0) if tip > uint64(gs.cfg.GasStation.SuggestBlockWindow) { endBlockHeight = tip - uint64(gs.cfg.GasStation.SuggestBlockWindow) } for height := tip; height > endBlockHeight; height-- { blk, err := gs.bc.GetBlockByHeight(height) if err != nil { return gs.cfg.GasStation.DefaultGas, err } if len(blk.Actions) == 0 { continue } if len(blk.Actions) == 1 && gs.IsSystemAction(blk.Actions[0]) { continue } smallestPrice := blk.Actions[0].GasPrice() for _, act := range blk.Actions { if gs.IsSystemAction(act) { continue } if smallestPrice.Cmp(act.GasPrice()) == 1 { smallestPrice = act.GasPrice() } } smallestPrices = append(smallestPrices, smallestPrice) } if len(smallestPrices) == 0 { // return default price return gs.cfg.GasStation.DefaultGas, nil } sort.Sort(bigIntArray(smallestPrices)) gasPrice := smallestPrices[(len(smallestPrices)-1)*gs.cfg.GasStation.Percentile/100].Uint64() if gasPrice < gs.cfg.GasStation.DefaultGas { gasPrice = gs.cfg.GasStation.DefaultGas } return gasPrice, nil } // EstimateGasForAction estimate gas for action func (gs *GasStation) EstimateGasForAction(actPb *iotextypes.Action) (uint64, error) { var selp action.SealedEnvelope if err := selp.LoadProto(actPb); err != nil { return 0, err } // Special handling for executions if sc, ok := selp.Action().(*action.Execution); ok { callerAddr, err := address.FromBytes(selp.SrcPubkey().Hash()) if err != nil { return 0, err } _, receipt, err := gs.bc.ExecuteContractRead(callerAddr, sc) if err != nil { return 0, err } return receipt.GasConsumed, nil } gas, err := selp.IntrinsicGas() if err != nil { return 0, err } return gas, nil } type bigIntArray []*big.Int func (s bigIntArray) Len() int { return len(s) } func (s bigIntArray) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 } func (s bigIntArray) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
1
19,514
assignments should only be cuddled with other assignments (from `wsl`)
iotexproject-iotex-core
go
@@ -504,7 +504,15 @@ func (c *ConfigLocal) MetadataVersion() MetadataVer { // DataVersion implements the Config interface for ConfigLocal. func (c *ConfigLocal) DataVersion() DataVer { - return 1 + return FilesWithHolesDataVer +} + +// DefaultNewBlockDataVersion returns the default data version for new blocks. +func DefaultNewBlockDataVersion(c Config, holes bool) DataVer { + if holes { + return FilesWithHolesDataVer + } + return FirstValidDataVer } // DoBackgroundFlushes implements the Config interface for ConfigLocal.
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "sync" "time" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/logger" keybase1 "github.com/keybase/client/go/protocol" metrics "github.com/rcrowley/go-metrics" "golang.org/x/net/context" ) const ( // Max supported plaintext size of a file in KBFS. TODO: increase // this once we support multiple levels of indirection. maxFileBytesDefault = 2 * 1024 * 1024 * 1024 // Max supported size of a directory entry name. maxNameBytesDefault = 255 // Maximum supported plaintext size of a directory in KBFS. TODO: // increase this once we support levels of indirection for // directories. maxDirBytesDefault = 512 * 1024 // Default time after setting the rekey bit before prompting for a // paper key. rekeyWithPromptWaitTimeDefault = 10 * time.Minute // How often do we check for stuff to reclaim? qrPeriodDefault = 1 * time.Minute // How long must something be unreferenced before we reclaim it? qrUnrefAgeDefault = 1 * time.Minute // tlfValidDurationDefault is the default for tlf validity before redoing identify. tlfValidDurationDefault = 6 * time.Hour ) // ConfigLocal implements the Config interface using purely local // server objects (no KBFS operations used RPCs). type ConfigLocal struct { lock sync.RWMutex kbfs KBFSOps keyman KeyManager rep Reporter kcache KeyCache bcache BlockCache codec Codec mdops MDOps kops KeyOps crypto Crypto mdcache MDCache bops BlockOps mdserv MDServer bserv BlockServer keyserv KeyServer daemon KeybaseDaemon bsplit BlockSplitter notifier Notifier clock Clock kbpki KBPKI renamer ConflictRenamer registry metrics.Registry loggerFn func(prefix string) logger.Logger noBGFlush bool // logic opposite so the default value is the common setting rwpWaitTime time.Duration sharingBeforeSignupEnabled bool maxFileBytes uint64 maxNameBytes uint32 maxDirBytes uint64 rekeyQueue RekeyQueue qrPeriod time.Duration qrUnrefAge time.Duration // allKnownConfigsForTesting is used for testing, and contains all created // Config objects in this test. allKnownConfigsForTesting *[]Config // tlfValidDuration is the time TLFs are valid before redoing identification. tlfValidDuration time.Duration } var _ Config = (*ConfigLocal)(nil) // LocalUser represents a fake KBFS user, useful for testing. type LocalUser struct { UserInfo Asserts []string // Index into UserInfo.CryptPublicKeys. CurrentCryptPublicKeyIndex int // Index into UserInfo.VerifyingKeys. CurrentVerifyingKeyIndex int } // GetCurrentCryptPublicKey returns this LocalUser's public encryption key. func (lu *LocalUser) GetCurrentCryptPublicKey() CryptPublicKey { return lu.CryptPublicKeys[lu.CurrentCryptPublicKeyIndex] } // GetCurrentVerifyingKey returns this LocalUser's public signing key. func (lu *LocalUser) GetCurrentVerifyingKey() VerifyingKey { return lu.VerifyingKeys[lu.CurrentVerifyingKeyIndex] } func verifyingKeysToPublicKeys(keys []VerifyingKey) []keybase1.PublicKey { publicKeys := make([]keybase1.PublicKey, len(keys)) for i, key := range keys { publicKeys[i] = keybase1.PublicKey{ KID: key.kid, IsSibkey: true, } } return publicKeys } func cryptPublicKeysToPublicKeys(keys []CryptPublicKey) []keybase1.PublicKey { publicKeys := make([]keybase1.PublicKey, len(keys)) for i, key := range keys { publicKeys[i] = keybase1.PublicKey{ KID: key.kid, IsSibkey: false, } } return publicKeys } // GetPublicKeys returns all of this LocalUser's public encryption keys. func (lu *LocalUser) GetPublicKeys() []keybase1.PublicKey { sibkeys := verifyingKeysToPublicKeys(lu.VerifyingKeys) subkeys := cryptPublicKeysToPublicKeys(lu.CryptPublicKeys) return append(sibkeys, subkeys...) } // Helper functions to get a various keys for a local user suitable // for use with CryptoLocal. Each function will return the same key // will always be returned for a given user. // MakeLocalUserSigningKeyOrBust returns a unique signing key for this user. func MakeLocalUserSigningKeyOrBust(name libkb.NormalizedUsername) SigningKey { return MakeFakeSigningKeyOrBust(string(name) + " signing key") } // MakeLocalUserVerifyingKeyOrBust makes a new verifying key // corresponding to the signing key for this user. func MakeLocalUserVerifyingKeyOrBust(name libkb.NormalizedUsername) VerifyingKey { return MakeLocalUserSigningKeyOrBust(name).GetVerifyingKey() } // MakeLocalUserCryptPrivateKeyOrBust returns a unique private // encryption key for this user. func MakeLocalUserCryptPrivateKeyOrBust(name libkb.NormalizedUsername) CryptPrivateKey { return MakeFakeCryptPrivateKeyOrBust(string(name) + " crypt key") } // MakeLocalUserCryptPublicKeyOrBust returns the public key // corresponding to the crypt private key for this user. func MakeLocalUserCryptPublicKeyOrBust(name libkb.NormalizedUsername) CryptPublicKey { return MakeLocalUserCryptPrivateKeyOrBust(name).getPublicKey() } // MakeLocalUsers is a helper function to generate a list of // LocalUsers suitable to use with KBPKILocal. func MakeLocalUsers(users []libkb.NormalizedUsername) []LocalUser { localUsers := make([]LocalUser, len(users)) for i := 0; i < len(users); i++ { verifyingKey := MakeLocalUserVerifyingKeyOrBust(users[i]) cryptPublicKey := MakeLocalUserCryptPublicKeyOrBust(users[i]) localUsers[i] = LocalUser{ UserInfo: UserInfo{ Name: users[i], UID: keybase1.MakeTestUID(uint32(i + 1)), VerifyingKeys: []VerifyingKey{verifyingKey}, CryptPublicKeys: []CryptPublicKey{cryptPublicKey}, KIDNames: map[keybase1.KID]string{ verifyingKey.KID(): "dev1", }, }, CurrentCryptPublicKeyIndex: 0, CurrentVerifyingKeyIndex: 0, } } return localUsers } // NewConfigLocal constructs a new ConfigLocal with default components. func NewConfigLocal() *ConfigLocal { config := &ConfigLocal{} config.SetClock(wallClock{}) config.SetReporter(NewReporterSimple(config.Clock(), 10)) config.SetConflictRenamer(WriterDeviceDateConflictRenamer{config}) config.ResetCaches() config.SetCodec(NewCodecMsgpack()) config.SetBlockOps(&BlockOpsStandard{config}) config.SetKeyOps(&KeyOpsStandard{config}) config.SetRekeyQueue(NewRekeyQueueStandard(config)) config.maxFileBytes = maxFileBytesDefault config.maxNameBytes = maxNameBytesDefault config.maxDirBytes = maxDirBytesDefault config.rwpWaitTime = rekeyWithPromptWaitTimeDefault config.qrPeriod = qrPeriodDefault config.qrUnrefAge = qrUnrefAgeDefault // Don't bother creating the registry if UseNilMetrics is set. if !metrics.UseNilMetrics { registry := metrics.NewRegistry() config.SetMetricsRegistry(registry) } config.tlfValidDuration = tlfValidDurationDefault return config } // KBFSOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) KBFSOps() KBFSOps { c.lock.RLock() defer c.lock.RUnlock() return c.kbfs } // SetKBFSOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKBFSOps(k KBFSOps) { c.lock.Lock() defer c.lock.Unlock() c.kbfs = k } // KBPKI implements the Config interface for ConfigLocal. func (c *ConfigLocal) KBPKI() KBPKI { c.lock.RLock() defer c.lock.RUnlock() return c.kbpki } // SetKBPKI implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKBPKI(k KBPKI) { c.lock.Lock() defer c.lock.Unlock() c.kbpki = k } // KeyManager implements the Config interface for ConfigLocal. func (c *ConfigLocal) KeyManager() KeyManager { c.lock.RLock() defer c.lock.RUnlock() return c.keyman } // SetKeyManager implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKeyManager(k KeyManager) { c.lock.Lock() defer c.lock.Unlock() c.keyman = k } // Reporter implements the Config interface for ConfigLocal. func (c *ConfigLocal) Reporter() Reporter { c.lock.RLock() defer c.lock.RUnlock() return c.rep } // SetReporter implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetReporter(r Reporter) { c.lock.Lock() defer c.lock.Unlock() c.rep = r } // KeyCache implements the Config interface for ConfigLocal. func (c *ConfigLocal) KeyCache() KeyCache { c.lock.RLock() defer c.lock.RUnlock() return c.kcache } // SetKeyCache implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKeyCache(k KeyCache) { c.lock.Lock() defer c.lock.Unlock() c.kcache = k } // BlockCache implements the Config interface for ConfigLocal. func (c *ConfigLocal) BlockCache() BlockCache { c.lock.RLock() defer c.lock.RUnlock() return c.bcache } // SetBlockCache implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetBlockCache(b BlockCache) { c.lock.Lock() defer c.lock.Unlock() c.bcache = b } // Crypto implements the Config interface for ConfigLocal. func (c *ConfigLocal) Crypto() Crypto { c.lock.RLock() defer c.lock.RUnlock() return c.crypto } // SetCrypto implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetCrypto(cr Crypto) { c.lock.Lock() defer c.lock.Unlock() c.crypto = cr } // Codec implements the Config interface for ConfigLocal. func (c *ConfigLocal) Codec() Codec { c.lock.RLock() defer c.lock.RUnlock() return c.codec } // SetCodec implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetCodec(co Codec) { c.lock.Lock() defer c.lock.Unlock() c.codec = co RegisterOps(c.codec) } // MDOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) MDOps() MDOps { c.lock.RLock() defer c.lock.RUnlock() return c.mdops } // SetMDOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetMDOps(m MDOps) { c.lock.Lock() defer c.lock.Unlock() c.mdops = m } // KeyOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) KeyOps() KeyOps { c.lock.RLock() defer c.lock.RUnlock() return c.kops } // SetKeyOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKeyOps(k KeyOps) { c.lock.Lock() defer c.lock.Unlock() c.kops = k } // MDCache implements the Config interface for ConfigLocal. func (c *ConfigLocal) MDCache() MDCache { c.lock.RLock() defer c.lock.RUnlock() return c.mdcache } // SetMDCache implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetMDCache(m MDCache) { c.lock.Lock() defer c.lock.Unlock() c.mdcache = m } // BlockOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) BlockOps() BlockOps { c.lock.RLock() defer c.lock.RUnlock() return c.bops } // SetBlockOps implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetBlockOps(b BlockOps) { c.lock.Lock() defer c.lock.Unlock() c.bops = b } // MDServer implements the Config interface for ConfigLocal. func (c *ConfigLocal) MDServer() MDServer { c.lock.RLock() defer c.lock.RUnlock() return c.mdserv } // SetMDServer implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetMDServer(m MDServer) { c.lock.Lock() defer c.lock.Unlock() c.mdserv = m } // BlockServer implements the Config interface for ConfigLocal. func (c *ConfigLocal) BlockServer() BlockServer { c.lock.RLock() defer c.lock.RUnlock() return c.bserv } // SetBlockServer implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetBlockServer(b BlockServer) { c.lock.Lock() defer c.lock.Unlock() c.bserv = b } // KeyServer implements the Config interface for ConfigLocal. func (c *ConfigLocal) KeyServer() KeyServer { c.lock.RLock() defer c.lock.RUnlock() return c.keyserv } // SetKeyServer implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKeyServer(k KeyServer) { c.lock.Lock() defer c.lock.Unlock() c.keyserv = k } // KeybaseDaemon implements the Config interface for ConfigLocal. func (c *ConfigLocal) KeybaseDaemon() KeybaseDaemon { c.lock.RLock() defer c.lock.RUnlock() return c.daemon } // SetKeybaseDaemon implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetKeybaseDaemon(k KeybaseDaemon) { c.lock.Lock() defer c.lock.Unlock() c.daemon = k } // BlockSplitter implements the Config interface for ConfigLocal. func (c *ConfigLocal) BlockSplitter() BlockSplitter { c.lock.RLock() defer c.lock.RUnlock() return c.bsplit } // SetBlockSplitter implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetBlockSplitter(b BlockSplitter) { c.lock.Lock() defer c.lock.Unlock() c.bsplit = b } // Notifier implements the Config interface for ConfigLocal. func (c *ConfigLocal) Notifier() Notifier { c.lock.RLock() defer c.lock.RUnlock() return c.notifier } // SetNotifier implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetNotifier(n Notifier) { c.lock.Lock() defer c.lock.Unlock() c.notifier = n } // Clock implements the Config interface for ConfigLocal. func (c *ConfigLocal) Clock() Clock { c.lock.RLock() defer c.lock.RUnlock() return c.clock } // SetClock implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetClock(cl Clock) { c.lock.Lock() defer c.lock.Unlock() c.clock = cl } // ConflictRenamer implements the Config interface for ConfigLocal. func (c *ConfigLocal) ConflictRenamer() ConflictRenamer { c.lock.RLock() defer c.lock.RUnlock() return c.renamer } // SetConflictRenamer implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetConflictRenamer(cr ConflictRenamer) { c.lock.Lock() defer c.lock.Unlock() c.renamer = cr } // MetadataVersion implements the Config interface for ConfigLocal. func (c *ConfigLocal) MetadataVersion() MetadataVer { return InitialExtraMetadataVer } // DataVersion implements the Config interface for ConfigLocal. func (c *ConfigLocal) DataVersion() DataVer { return 1 } // DoBackgroundFlushes implements the Config interface for ConfigLocal. func (c *ConfigLocal) DoBackgroundFlushes() bool { return !c.noBGFlush } // RekeyWithPromptWaitTime implements the Config interface for // ConfigLocal. func (c *ConfigLocal) RekeyWithPromptWaitTime() time.Duration { return c.rwpWaitTime } // SharingBeforeSignupEnabled returns whether or not this client will // handle sharing before signup. func (c *ConfigLocal) SharingBeforeSignupEnabled() bool { c.lock.RLock() defer c.lock.RUnlock() return c.sharingBeforeSignupEnabled } // SetSharingBeforeSignupEnabled sets whether or not this client will // handle sharing before signup. func (c *ConfigLocal) SetSharingBeforeSignupEnabled(sharingBeforeSignupEnabled bool) { c.lock.Lock() defer c.lock.Unlock() c.sharingBeforeSignupEnabled = sharingBeforeSignupEnabled } // QuotaReclamationPeriod implements the Config interface for ConfigLocal. func (c *ConfigLocal) QuotaReclamationPeriod() time.Duration { return c.qrPeriod } // QuotaReclamationMinUnrefAge implements the Config interface for ConfigLocal. func (c *ConfigLocal) QuotaReclamationMinUnrefAge() time.Duration { return c.qrUnrefAge } // ReqsBufSize implements the Config interface for ConfigLocal. func (c *ConfigLocal) ReqsBufSize() int { return 20 } // MaxFileBytes implements the Config interface for ConfigLocal. func (c *ConfigLocal) MaxFileBytes() uint64 { return c.maxFileBytes } // MaxNameBytes implements the Config interface for ConfigLocal. func (c *ConfigLocal) MaxNameBytes() uint32 { return c.maxNameBytes } // MaxDirBytes implements the Config interface for ConfigLocal. func (c *ConfigLocal) MaxDirBytes() uint64 { return c.maxDirBytes } // ResetCaches implements the Config interface for ConfigLocal. func (c *ConfigLocal) ResetCaches() { c.lock.Lock() defer c.lock.Unlock() c.mdcache = NewMDCacheStandard(5000) c.kcache = NewKeyCacheStandard(5000) // Limit the block cache to 10K entries or 512 MB of bytes c.bcache = NewBlockCacheStandard(c, 10000, 512*1024*1024) } // MakeLogger implements the Config interface for ConfigLocal. func (c *ConfigLocal) MakeLogger(module string) logger.Logger { c.lock.RLock() defer c.lock.RUnlock() return c.loggerFn(module) } // SetLoggerMaker implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetLoggerMaker( loggerFn func(module string) logger.Logger) { c.lock.Lock() defer c.lock.Unlock() c.loggerFn = loggerFn } // NewConfigLocalWithCryptoForSigning initializes a local crypto // config w/a crypto interface, using the given signing key, that can // be used for non-PKI crypto. func NewConfigLocalWithCryptoForSigning(signingKey SigningKey) *ConfigLocal { config := NewConfigLocal() config.SetLoggerMaker(func(m string) logger.Logger { return logger.NewNull() }) cryptPrivateKey := MakeLocalUserCryptPrivateKeyOrBust("nobody") crypto := NewCryptoLocal(config, signingKey, cryptPrivateKey) config.SetCrypto(crypto) return config } // NewConfigLocalWithCrypto initializes a local crypto config w/a crypto interface that can be used for non-PKI crypto. func NewConfigLocalWithCrypto() *ConfigLocal { signingKey := MakeLocalUserSigningKeyOrBust("nobody") return NewConfigLocalWithCryptoForSigning(signingKey) } // MetricsRegistry implements the Config interface for ConfigLocal. func (c *ConfigLocal) MetricsRegistry() metrics.Registry { return c.registry } // SetRekeyQueue implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetRekeyQueue(r RekeyQueue) { c.rekeyQueue = r } // RekeyQueue implements the Config interface for ConfigLocal. func (c *ConfigLocal) RekeyQueue() RekeyQueue { return c.rekeyQueue } // SetMetricsRegistry implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetMetricsRegistry(r metrics.Registry) { c.registry = r } // SetTLFValidDuration implements the Config interface for ConfigLocal. func (c *ConfigLocal) SetTLFValidDuration(r time.Duration) { c.tlfValidDuration = r } // TLFValidDuration implements the Config interface for ConfigLocal. func (c *ConfigLocal) TLFValidDuration() time.Duration { return c.tlfValidDuration } // Shutdown implements the Config interface for ConfigLocal. func (c *ConfigLocal) Shutdown() error { c.RekeyQueue().Clear() c.RekeyQueue().Wait(context.Background()) if c.CheckStateOnShutdown() { // Before we do anything, wait for all archiving to finish. for _, config := range *c.allKnownConfigsForTesting { kbfsOps, ok := config.KBFSOps().(*KBFSOpsStandard) if !ok { continue } for _, fbo := range kbfsOps.ops { err := fbo.fbm.waitForArchives(context.Background()) if err != nil { return err } } } } err := c.KBFSOps().Shutdown() // Continue with shutdown regardless of err. c.MDServer().Shutdown() c.KeyServer().Shutdown() c.KeybaseDaemon().Shutdown() c.BlockServer().Shutdown() c.Crypto().Shutdown() c.Reporter().Shutdown() return err } // CheckStateOnShutdown implements the Config interface for ConfigLocal. func (c *ConfigLocal) CheckStateOnShutdown() bool { if md, ok := c.MDServer().(*MDServerLocal); ok { return !md.isShutdown() } return false }
1
11,309
Please move this to `block_types.go`.
keybase-kbfs
go
@@ -40,9 +40,6 @@ namespace NLog.Layouts /// JSON attribute. /// </summary> [NLogConfigurationItem] - [ThreadAgnostic] - [ThreadSafe] - [AppDomainFixedOutput] public class JsonAttribute { /// <summary>
1
// // Copyright (c) 2004-2020 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * Neither the name of Jaroslaw Kowalski nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // namespace NLog.Layouts { using System.ComponentModel; using NLog.Config; /// <summary> /// JSON attribute. /// </summary> [NLogConfigurationItem] [ThreadAgnostic] [ThreadSafe] [AppDomainFixedOutput] public class JsonAttribute { /// <summary> /// Initializes a new instance of the <see cref="JsonAttribute" /> class. /// </summary> public JsonAttribute() : this(null, null, true) { } /// <summary> /// Initializes a new instance of the <see cref="JsonAttribute" /> class. /// </summary> /// <param name="name">The name of the attribute.</param> /// <param name="layout">The layout of the attribute's value.</param> public JsonAttribute(string name, Layout layout): this(name, layout, true) { } /// <summary> /// Initializes a new instance of the <see cref="JsonAttribute" /> class. /// </summary> /// <param name="name">The name of the attribute.</param> /// <param name="layout">The layout of the attribute's value.</param> /// <param name="encode">Encode value with json-encode</param> public JsonAttribute(string name, Layout layout, bool encode) { Name = name; Layout = layout; Encode = encode; IncludeEmptyValue = false; } /// <summary> /// Gets or sets the name of the attribute. /// </summary> /// <docgen category='JSON Attribute Options' order='10' /> [RequiredParameter] public string Name { get; set; } /// <summary> /// Gets or sets the layout that will be rendered as the attribute's value. /// </summary> /// <docgen category='JSON Attribute Options' order='10' /> [RequiredParameter] public Layout Layout { get => LayoutWrapper.Inner; set => LayoutWrapper.Inner = value; } /// <summary> /// Determines whether or not this attribute will be Json encoded. /// </summary> /// <docgen category='JSON Attribute Options' order='100' /> public bool Encode { get => LayoutWrapper.JsonEncode; set => LayoutWrapper.JsonEncode = value; } /// <summary> /// Gets or sets a value indicating whether to escape non-ascii characters /// </summary> /// <docgen category='JSON Attribute Options' order='100' /> public bool EscapeUnicode { get => LayoutWrapper.EscapeUnicode; set => LayoutWrapper.EscapeUnicode = value; } /// <summary> /// Should forward slashes be escaped? If true, / will be converted to \/ /// </summary> /// <docgen category='JSON Attribute Options' order='100' /> [DefaultValue(true)] // TODO NLog 5 change to nullable (with default fallback to false) public bool EscapeForwardSlash { get => LayoutWrapper.EscapeForwardSlash; set => LayoutWrapper.EscapeForwardSlash = value; } /// <summary> /// Gets or sets whether an attribute with empty value should be included in the output /// </summary> /// <docgen category='JSON Attribute Options' order='100' /> public bool IncludeEmptyValue { get; set; } internal readonly LayoutRenderers.Wrappers.JsonEncodeLayoutRendererWrapper LayoutWrapper = new LayoutRenderers.Wrappers.JsonEncodeLayoutRendererWrapper(); } }
1
21,267
So this is not needed anymore for all attributes?
NLog-NLog
.cs
@@ -46,13 +46,13 @@ namespace OpenTelemetry.Extensions.Hosting.Implementation } } - [Event(1, Message = "Failed to initialize: '{0}'. OpenTelemetry will not work.", Level = EventLevel.Error)] + [Event(1, Message = "An exception occured while adding OpenTelemetry Tracing to ServiceCollection. OpenTelemetry tracing will not work. Exception: '{0}'.", Level = EventLevel.Error)] public void FailedInitialize(string exception) { this.WriteEvent(1, exception); } - [Event(2, Message = "Failed to get OpenTelemetrySDK: '{0}'. OpenTelemetry will not work.", Level = EventLevel.Error)] + [Event(2, Message = "An exception occured while retrieving OpenTelemetry Tracer from Service Provider. OpenTelemetry tracing will not work. Exception: '{0}'.", Level = EventLevel.Error)] public void FailedOpenTelemetrySDK(string exception) { this.WriteEvent(2, exception);
1
// <copyright file="HostingExtensionsEventSource.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics.Tracing; using OpenTelemetry.Internal; namespace OpenTelemetry.Extensions.Hosting.Implementation { /// <summary> /// EventSource events emitted from the project. /// </summary> [EventSource(Name = "OpenTelemetry-Extensions-Hosting")] internal class HostingExtensionsEventSource : EventSource { public static HostingExtensionsEventSource Log = new HostingExtensionsEventSource(); [NonEvent] public void FailedInitialize(Exception ex) { if (this.IsEnabled(EventLevel.Error, (EventKeywords)(-1))) { this.FailedInitialize(ex.ToInvariantString()); } } [NonEvent] public void FailedOpenTelemetrySDK(Exception ex) { if (this.IsEnabled(EventLevel.Error, (EventKeywords)(-1))) { this.FailedOpenTelemetrySDK(ex.ToInvariantString()); } } [Event(1, Message = "Failed to initialize: '{0}'. OpenTelemetry will not work.", Level = EventLevel.Error)] public void FailedInitialize(string exception) { this.WriteEvent(1, exception); } [Event(2, Message = "Failed to get OpenTelemetrySDK: '{0}'. OpenTelemetry will not work.", Level = EventLevel.Error)] public void FailedOpenTelemetrySDK(string exception) { this.WriteEvent(2, exception); } } }
1
16,906
Happy to get suggestion for better messaging here. Here's what I intended to convey: Something is wrong - the Exception is a hint to what might be the cause The impact of that - tracing wont work.
open-telemetry-opentelemetry-dotnet
.cs
@@ -108,6 +108,11 @@ def start_acm(port=None, asynchronous=False): return start_moto_server('acm', port, name='ACM', asynchronous=asynchronous) +def start_ses(port=None, asynchronous=False, update_listener=None): + port = port or config.PORT_SES + return start_moto_server('ses', port, name='SES', asynchronous=asynchronous, update_listener=update_listener) + + # TODO move to es_starter.py? def start_elasticsearch_service(port=None, asynchronous=False): port = port or config.PORT_ES
1
# noqa import os import re import sys import json import time import signal import logging import traceback import boto3 import subprocess from moto import core as moto_core from requests.models import Response from localstack import config, constants from localstack.utils import common, persistence from localstack.constants import ( ENV_DEV, LOCALSTACK_VENV_FOLDER, LOCALSTACK_INFRA_PROCESS, DEFAULT_SERVICE_PORTS) from localstack.utils.common import (TMP_THREADS, run, get_free_tcp_port, is_linux, start_thread, ShellCommandThread, in_docker, is_port_open, sleep_forever, print_debug, edge_ports_info) from localstack.utils.server import multiserver from localstack.utils.testutil import is_local_test_mode from localstack.utils.bootstrap import ( setup_logging, canonicalize_api_names, load_plugins, in_ci) from localstack.utils.analytics import event_publisher from localstack.services import generic_proxy, install from localstack.services.es import es_api from localstack.services.plugins import SERVICE_PLUGINS, record_service_health, check_infra from localstack.services.firehose import firehose_api from localstack.services.awslambda import lambda_api from localstack.services.generic_proxy import GenericProxyHandler, ProxyListener, start_proxy_server from localstack.services.cloudformation import cloudformation_api from localstack.services.dynamodbstreams import dynamodbstreams_api from localstack.utils.analytics.profiler import log_duration # flag to indicate whether signal handlers have been set up already SIGNAL_HANDLERS_SETUP = False # output string that indicates that the stack is ready READY_MARKER_OUTPUT = 'Ready.' # default backend host address DEFAULT_BACKEND_HOST = '127.0.0.1' # maps ports to proxy listener details PROXY_LISTENERS = {} # set up logger LOG = logging.getLogger(__name__) # ----------------------- # CONFIG UPDATE BACKDOOR # ----------------------- def update_config_variable(variable, new_value): if new_value is not None: LOG.info('Updating value of config variable "%s": %s' % (variable, new_value)) setattr(config, variable, new_value) class ConfigUpdateProxyListener(ProxyListener): """ Default proxy listener that intercepts requests to retrieve or update config variables. """ def forward_request(self, method, path, data, headers): if path != constants.CONFIG_UPDATE_PATH or method != 'POST': return True response = Response() data = json.loads(data) variable = data.get('variable', '') response._content = '{}' response.status_code = 200 if not re.match(r'^[_a-zA-Z0-9]+$', variable): response.status_code = 400 return response new_value = data.get('value') update_config_variable(variable, new_value) value = getattr(config, variable, None) result = {'variable': variable, 'value': value} response._content = json.dumps(result) return response GenericProxyHandler.DEFAULT_LISTENERS.append(ConfigUpdateProxyListener()) # ----------------- # API ENTRY POINTS # ----------------- def start_sns(port=None, asynchronous=False, update_listener=None): port = port or config.PORT_SNS return start_moto_server('sns', port, name='SNS', asynchronous=asynchronous, update_listener=update_listener) def start_sts(port=None, asynchronous=False): port = port or config.PORT_STS return start_moto_server('sts', port, name='STS', asynchronous=asynchronous) def start_redshift(port=None, asynchronous=False): port = port or config.PORT_REDSHIFT return start_moto_server('redshift', port, name='Redshift', asynchronous=asynchronous) def start_acm(port=None, asynchronous=False): port = port or config.PORT_ACM return start_moto_server('acm', port, name='ACM', asynchronous=asynchronous) # TODO move to es_starter.py? def start_elasticsearch_service(port=None, asynchronous=False): port = port or config.PORT_ES return start_local_api('ES', port, api='es', method=es_api.serve, asynchronous=asynchronous) def start_firehose(port=None, asynchronous=False): port = port or config.PORT_FIREHOSE return start_local_api('Firehose', port, api='firehose', method=firehose_api.serve, asynchronous=asynchronous) def start_dynamodbstreams(port=None, asynchronous=False): port = port or config.PORT_DYNAMODBSTREAMS return start_local_api('DynamoDB Streams', port, api='dynamodbstreams', method=dynamodbstreams_api.serve, asynchronous=asynchronous) def start_lambda(port=None, asynchronous=False): port = port or config.PORT_LAMBDA return start_local_api('Lambda', port, api='lambda', method=lambda_api.serve, asynchronous=asynchronous) def start_cloudformation(port=None, asynchronous=False): port = port or config.PORT_CLOUDFORMATION return start_local_api('CloudFormation', port, api='cloudformation', method=cloudformation_api.serve, asynchronous=asynchronous) def start_ssm(port=None, asynchronous=False, update_listener=None): port = port or config.PORT_SSM return start_moto_server('ssm', port, name='SSM', asynchronous=asynchronous, update_listener=update_listener) # --------------- # HELPER METHODS # --------------- def patch_urllib3_connection_pool(**constructor_kwargs): """ Override the default parameters of HTTPConnectionPool, e.g., set the pool size via maxsize=16 """ try: from urllib3 import connectionpool, poolmanager class MyHTTPSConnectionPool(connectionpool.HTTPSConnectionPool): def __init__(self, *args, **kwargs): kwargs.update(constructor_kwargs) super(MyHTTPSConnectionPool, self).__init__(*args, **kwargs) poolmanager.pool_classes_by_scheme['https'] = MyHTTPSConnectionPool class MyHTTPConnectionPool(connectionpool.HTTPConnectionPool): def __init__(self, *args, **kwargs): kwargs.update(constructor_kwargs) super(MyHTTPConnectionPool, self).__init__(*args, **kwargs) poolmanager.pool_classes_by_scheme['http'] = MyHTTPConnectionPool except Exception: pass def patch_instance_tracker_meta(): """ Avoid instance collection for moto dashboard """ def new_intance(meta, name, bases, dct): cls = super(moto_core.models.InstanceTrackerMeta, meta).__new__(meta, name, bases, dct) if name == 'BaseModel': return cls cls.instances = [] return cls moto_core.models.InstanceTrackerMeta.__new__ = new_intance def new_basemodel(cls, *args, **kwargs): instance = super(moto_core.models.BaseModel, cls).__new__(cls) return instance moto_core.models.BaseModel.__new__ = new_basemodel def set_service_status(data): command = data.get('command') service = data.get('service') service_ports = config.parse_service_ports() if command == 'start': existing = service_ports.get(service) port = DEFAULT_SERVICE_PORTS.get(service) if existing: status = get_service_status(service, port) if status == 'running': return key_upper = service.upper().replace('-', '_') port_variable = 'PORT_%s' % key_upper service_list = os.environ.get('SERVICES', '').strip() services = [e for e in re.split(r'[\s,]+', service_list) if e] contained = [s for s in services if s.startswith(service)] if not contained: services.append(service) update_config_variable(port_variable, port) new_service_list = ','.join(services) os.environ['SERVICES'] = new_service_list config.populate_configs() LOG.info('Starting service %s on port %s' % (service, port)) SERVICE_PLUGINS[service].start(asynchronous=True) return {} def get_services_status(): result = {} for service, port in config.parse_service_ports().items(): status = get_service_status(service, port) result[service] = { 'port': port, 'status': status } return result def get_service_status(service, port=None): port = port or config.parse_service_ports().get(service) status = 'disabled' if (port or 0) <= 0 else 'running' if is_port_open(port) else 'stopped' return status def get_multiserver_or_free_service_port(): if config.FORWARD_EDGE_INMEM: return multiserver.get_moto_server_port() return get_free_tcp_port() def register_signal_handlers(): global SIGNAL_HANDLERS_SETUP if SIGNAL_HANDLERS_SETUP: return # register signal handlers def signal_handler(signal, frame): stop_infra() os._exit(0) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) SIGNAL_HANDLERS_SETUP = True def do_run(cmd, asynchronous, print_output=None, env_vars={}): sys.stdout.flush() if asynchronous: if config.DEBUG and print_output is None: print_output = True outfile = subprocess.PIPE if print_output else None t = ShellCommandThread(cmd, outfile=outfile, env_vars=env_vars) t.start() TMP_THREADS.append(t) return t return run(cmd, env_vars=env_vars) def start_proxy_for_service(service_name, port, backend_port, update_listener, quiet=False, params={}): # TODO: remove special switch for Elasticsearch (see also note in service_port(...) in config.py) if config.FORWARD_EDGE_INMEM and service_name != 'elasticsearch': if backend_port: PROXY_LISTENERS[service_name] = (service_name, backend_port, update_listener) return # check if we have a custom backend configured custom_backend_url = os.environ.get('%s_BACKEND' % service_name.upper()) backend_url = custom_backend_url or ('http://%s:%s' % (DEFAULT_BACKEND_HOST, backend_port)) return start_proxy(port, backend_url=backend_url, update_listener=update_listener, quiet=quiet, params=params) def start_proxy(port, backend_url, update_listener=None, quiet=False, params={}, use_ssl=None): use_ssl = config.USE_SSL if use_ssl is None else use_ssl proxy_thread = start_proxy_server(port=port, forward_url=backend_url, use_ssl=use_ssl, update_listener=update_listener, quiet=quiet, params=params) return proxy_thread def start_moto_server(key, port, name=None, backend_port=None, asynchronous=False, update_listener=None): if not name: name = key print('Starting mock %s service on %s ...' % (name, edge_ports_info())) if not backend_port: if config.FORWARD_EDGE_INMEM: backend_port = multiserver.get_moto_server_port() elif config.USE_SSL or update_listener: backend_port = get_free_tcp_port() if backend_port or config.FORWARD_EDGE_INMEM: start_proxy_for_service(key, port, backend_port, update_listener) if config.BUNDLE_API_PROCESSES: return multiserver.start_api_server(key, backend_port or port) return start_moto_server_separate(key, port, name=name, backend_port=backend_port, asynchronous=asynchronous) def start_moto_server_separate(key, port, name=None, backend_port=None, asynchronous=False): moto_server_cmd = '%s/bin/moto_server' % LOCALSTACK_VENV_FOLDER if not os.path.exists(moto_server_cmd): moto_server_cmd = run('which moto_server').strip() cmd = 'VALIDATE_LAMBDA_S3=0 %s %s -p %s -H %s' % (moto_server_cmd, key, backend_port or port, constants.BIND_HOST) return do_run(cmd, asynchronous) def start_local_api(name, port, api, method, asynchronous=False): print('Starting mock %s service on %s ...' % (name, edge_ports_info())) if config.FORWARD_EDGE_INMEM: port = get_free_tcp_port() PROXY_LISTENERS[api] = (api, port, None) if asynchronous: thread = start_thread(method, port, quiet=True) return thread else: method(port) def stop_infra(debug=False): if common.INFRA_STOPPED: return common.INFRA_STOPPED = True event_publisher.fire_event(event_publisher.EVENT_STOP_INFRA) generic_proxy.QUIET = True print_debug('[shutdown] Cleaning up files ...', debug) common.cleanup(files=True, quiet=True) print_debug('[shutdown] Cleaning up resources ...', debug) common.cleanup_resources(debug=debug) print_debug('[shutdown] Cleaning up Lambda resources ...', debug) lambda_api.cleanup() time.sleep(2) # TODO: optimize this (takes too long currently) # check_infra(retries=2, expect_shutdown=True) def check_aws_credentials(): session = boto3.Session() credentials = None # hardcode credentials here, to allow us to determine internal API calls made via boto3 os.environ['AWS_ACCESS_KEY_ID'] = constants.INTERNAL_AWS_ACCESS_KEY_ID os.environ['AWS_SECRET_ACCESS_KEY'] = constants.INTERNAL_AWS_ACCESS_KEY_ID try: credentials = session.get_credentials() except Exception: pass session = boto3.Session() credentials = session.get_credentials() assert credentials # ------------- # MAIN STARTUP # ------------- def start_infra(asynchronous=False, apis=None): try: os.environ[LOCALSTACK_INFRA_PROCESS] = '1' is_in_docker = in_docker() # print a warning if we're not running in Docker but using Docker based LAMBDA_EXECUTOR if not is_in_docker and 'docker' in config.LAMBDA_EXECUTOR and not is_linux(): print(('!WARNING! - Running outside of Docker with $LAMBDA_EXECUTOR=%s can lead to ' 'problems on your OS. The environment variable $LOCALSTACK_HOSTNAME may not ' 'be properly set in your Lambdas.') % config.LAMBDA_EXECUTOR) if is_in_docker and not config.LAMBDA_REMOTE_DOCKER and not os.environ.get('HOST_TMP_FOLDER'): print('!WARNING! - Looks like you have configured $LAMBDA_REMOTE_DOCKER=0 - ' "please make sure to configure $HOST_TMP_FOLDER to point to your host's $TMPDIR") # apply patches patch_urllib3_connection_pool(maxsize=128) patch_instance_tracker_meta() # load plugins load_plugins() # with plugins loaded, now start the infrastructure thread = do_start_infra(asynchronous, apis, is_in_docker) if not asynchronous and thread: # this is a bit of an ugly hack, but we need to make sure that we # stay in the execution context of the main thread, otherwise our # signal handlers don't work sleep_forever() return thread except KeyboardInterrupt: print('Shutdown') except Exception as e: print('Error starting infrastructure: %s %s' % (e, traceback.format_exc())) sys.stdout.flush() raise e finally: if not asynchronous: stop_infra() def do_start_infra(asynchronous, apis, is_in_docker): # import to avoid cyclic dependency from localstack.services.edge import BOOTSTRAP_LOCK event_publisher.fire_event(event_publisher.EVENT_START_INFRA, {'d': is_in_docker and 1 or 0, 'c': in_ci() and 1 or 0}) # set up logging setup_logging() # prepare APIs apis = canonicalize_api_names(apis) @log_duration() def prepare_environment(): # set environment os.environ['AWS_REGION'] = config.DEFAULT_REGION os.environ['ENV'] = ENV_DEV # register signal handlers if not is_local_test_mode(): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() @log_duration() def prepare_installation(): # install libs if not present install.install_components(apis) @log_duration() def start_api_services(): # Some services take a bit to come up sleep_time = 5 # start services thread = None # loop through plugins and start each service for name, plugin in SERVICE_PLUGINS.items(): if plugin.is_enabled(api_names=apis): record_service_health(name, 'starting') t1 = plugin.start(asynchronous=True) thread = thread or t1 time.sleep(sleep_time) # ensure that all infra components are up and running check_infra(apis=apis) # restore persisted data persistence.restore_persisted_data(apis=apis) return thread prepare_environment() prepare_installation() with BOOTSTRAP_LOCK: thread = start_api_services() print(READY_MARKER_OUTPUT) sys.stdout.flush() return thread
1
11,990
I think we can remove this function, right? (duplicate with `ses_starter.py`)
localstack-localstack
py
@@ -195,6 +195,10 @@ class PandasLikeSeries(_Frame): "Field {} not found, possible values are {}".format(name, ", ".join(fnames))) return anchor_wrap(self, self._spark_getField(name)) + # TODO: automate the process here + def alias(self, name): + return self.rename(name) + @property def schema(self): if not hasattr(self, '_pandas_schema') or self._pandas_schema is None:
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Base classes to be monkey-patched to DataFrame/Column to behave similar to pandas DataFrame/Series. """ from functools import reduce import pandas as pd import numpy as np import pyspark.sql.functions as F from pyspark.sql import DataFrame, Column from pyspark.sql.types import FloatType, DoubleType, StructType, to_arrow_type from pyspark.sql.utils import AnalysisException from . import namespace from .metadata import Metadata from .selection import SparkDataFrameLocator from ._dask_stubs.utils import derived_from from ._dask_stubs.compatibility import string_types __all__ = ['PandasLikeSeries', 'PandasLikeDataFrame', 'SparkSessionPatches', 'anchor_wrap'] max_display_count = 1000 class SparkSessionPatches(object): """ Methods for :class:`SparkSession`. """ def from_pandas(self, pdf): metadata = Metadata.from_pandas(pdf) reset_index = pdf.reset_index() reset_index.columns = metadata.all_fields df = self.createDataFrame(reset_index) df._metadata = metadata return df from_pandas.__doc__ = namespace.from_pandas.__doc__ def read_csv(self, path, header='infer', names=None, usecols=None, mangle_dupe_cols=True, parse_dates=False, comment=None): if mangle_dupe_cols is not True: raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols) if parse_dates is not False: raise ValueError("parse_dates can only be `False`: %s" % parse_dates) if usecols is not None and not callable(usecols): usecols = list(usecols) if usecols is None or callable(usecols) or len(usecols) > 0: reader = self.read.option("inferSchema", "true") if header == 'infer': header = 0 if names is None else None if header == 0: reader.option("header", True) elif header is None: reader.option("header", False) else: raise ValueError("Unknown header argument {}".format(header)) if comment is not None: if not isinstance(comment, string_types) or len(comment) != 1: raise ValueError("Only length-1 comment characters supported") reader.option("comment", comment) df = reader.csv(path) if header is None: df = df._spark_selectExpr(*["`%s` as `%s`" % (field.name, i) for i, field in enumerate(df.schema)]) if names is not None: names = list(names) if len(set(names)) != len(names): raise ValueError('Found non-unique column index') if len(names) != len(df.schema): raise ValueError('Names do not match the number of columns: %d' % len(names)) df = df._spark_selectExpr(*["`%s` as `%s`" % (field.name, name) for field, name in zip(df.schema, names)]) if usecols is not None: if callable(usecols): cols = [field.name for field in df.schema if usecols(field.name)] missing = [] elif all(isinstance(col, int) for col in usecols): cols = [field.name for i, field in enumerate(df.schema) if i in usecols] missing = [col for col in usecols if col >= len(df.schema) or df.schema[col].name not in cols] elif all(isinstance(col, string_types) for col in usecols): cols = [field.name for field in df.schema if field.name in usecols] missing = [col for col in usecols if col not in cols] else: raise ValueError("'usecols' must either be list-like of all strings, " "all unicode, all integers or a callable.") if len(missing) > 0: raise ValueError('Usecols do not match columns, columns expected but not ' 'found: %s' % missing) if len(cols) > 0: df = df._spark_select(cols) else: df = self.createDataFrame([], schema=StructType()) else: df = self.createDataFrame([], schema=StructType()) return df read_csv.__doc__ = namespace.read_csv.__doc__ def read_parquet(self, path, columns=None): if columns is not None: columns = list(columns) if columns is None or len(columns) > 0: df = self.read.parquet(path) if columns is not None: fields = [field.name for field in df.schema] cols = [col for col in columns if col in fields] if len(cols) > 0: df = df._spark_select(cols) else: df = self.createDataFrame([], schema=StructType()) else: df = self.createDataFrame([], schema=StructType()) return df read_parquet.__doc__ = namespace.read_parquet.__doc__ class _Frame(object): """ The base class for both dataframes and series. """ def max(self): return _reduce_spark(self, F.max) @derived_from(pd.DataFrame) def abs(self): """ Return a Series/DataFrame with absolute numeric value of each element. :return: :class:`Series` or :class:`DataFrame` with the absolute value of each element. """ return _spark_col_apply(self, F.abs) def compute(self): """Alias of `toPandas()` to mimic dask for easily porting tests.""" return self.toPandas() class PandasLikeSeries(_Frame): """ Methods that are appropriate for distributed series. """ def __init__(self): """ Define additional private fields. * ``_pandas_metadata``: The metadata which stores column fields, and index fields and names. * ``_spark_ref_dataframe``: The reference to DataFraem anchored to this Column. * ``_pandas_schema``: The schema when representing this Column as a DataFrame. """ self._pandas_metadata = None self._spark_ref_dataframe = None self._pandas_schema = None def astype(self, tpe): from .typing import as_spark_type spark_type = as_spark_type(tpe) if not spark_type: raise ValueError("Type {} not understood".format(tpe)) return anchor_wrap(self, self._spark_cast(spark_type)) def getField(self, name): if not isinstance(self.schema, StructType): raise AttributeError("Not a struct: {}".format(self.schema)) else: fnames = self.schema.fieldNames() if name not in fnames: raise AttributeError( "Field {} not found, possible values are {}".format(name, ", ".join(fnames))) return anchor_wrap(self, self._spark_getField(name)) @property def schema(self): if not hasattr(self, '_pandas_schema') or self._pandas_schema is None: self._pandas_schema = self.to_dataframe().schema return self._pandas_schema @property def shape(self): return len(self), @property def name(self): return self._jc.toString() @name.setter def name(self, name): self.rename(name, inplace=True) def rename(self, name, inplace=False): df = self.to_dataframe()._spark_select(self._metadata.index_fields + [self._spark_alias(name)]) df._metadata = self._metadata.copy(column_fields=[name]) col = _col(df) if inplace: anchor_wrap(col, self) self._jc = col._jc self._pandas_schema = None self._pandas_metadata = None return self else: return col @property def _metadata(self): if not hasattr(self, '_pandas_metadata') or self._pandas_metadata is None: ref = self._pandas_anchor self._pandas_metadata = ref._metadata.copy(column_fields=[self.name]) return self._pandas_metadata @derived_from(pd.Series) def reset_index(self, level=None, drop=False, name=None, inplace=False): if inplace and not drop: raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame') if name is not None: df = self.rename(name).to_dataframe() else: df = self.to_dataframe() df = df.reset_index(level=level, drop=drop) if drop: col = _col(df) if inplace: anchor_wrap(col, self) self._jc = col._jc self._pandas_schema = None self._pandas_metadata = None else: return col else: return df @property def loc(self): return SparkDataFrameLocator(self) def to_dataframe(self): ref = self._pandas_anchor df = ref._spark_select(self._metadata.index_fields + [self]) df._metadata = self._metadata.copy() return df def toPandas(self): return _col(self.to_dataframe().toPandas()) @derived_from(pd.Series) def isnull(self): if isinstance(self.schema[self.name].dataType, (FloatType, DoubleType)): return anchor_wrap(self, self._spark_isNull() | F._spark_isnan(self)) else: return anchor_wrap(self, self._spark_isNull()) isna = isnull @derived_from(pd.Series) def notnull(self): return ~self.isnull() notna = notnull @derived_from(pd.Series) def dropna(self, axis=0, inplace=False, **kwargs): col = _col(self.to_dataframe().dropna(axis=axis, inplace=False)) if inplace: anchor_wrap(col, self) self._jc = col._jc self._pandas_schema = None self._pandas_metadata = None else: return col def head(self, n=5): return _col(self.to_dataframe().head(n)) def unique(self): # Pandas wants a series/array-like object return _col(self.to_dataframe().unique()) @derived_from(pd.Series) def value_counts(self, normalize=False, sort=True, ascending=False, bins=None, dropna=True): if bins is not None: raise NotImplementedError("value_counts currently does not support bins") if dropna: df_dropna = self.to_dataframe()._spark_filter(self.notna()) else: df_dropna = self.to_dataframe() df = df_dropna._spark_groupby(self).count() if sort: if ascending: df = df._spark_orderBy(F._spark_col('count')) else: df = df._spark_orderBy(F._spark_col('count')._spark_desc()) if normalize: sum = df_dropna._spark_count() df = df._spark_withColumn('count', F._spark_col('count') / F._spark_lit(sum)) return _col(df.set_index([self.name])) @property def _pandas_anchor(self) -> DataFrame: """ The anchoring dataframe for this column (if any). :return: """ if hasattr(self, "_spark_ref_dataframe"): return self._spark_ref_dataframe n = self._pandas_orig_repr() raise ValueError("No reference to a dataframe for column {}".format(n)) def __len__(self): return len(self.to_dataframe()) def __getitem__(self, key): res = anchor_wrap(self, self._spark_getitem(key)) return res def __getattr__(self, item): if item.startswith("__") or item.startswith("_pandas_") or item.startswith("_spark_"): raise AttributeError(item) return anchor_wrap(self, self.getField(item)) def __invert__(self): return anchor_wrap(self, self._spark_cast("boolean") == F._spark_lit(False)) def __str__(self): return self._pandas_orig_repr() def __repr__(self): return repr(self.head(max_display_count).toPandas()) def __dir__(self): if not isinstance(self.schema, StructType): fields = [] else: fields = [f for f in self.schema.fieldNames() if ' ' not in f] return super(Column, self).__dir__() + fields def _pandas_orig_repr(self): # TODO: figure out how to reuse the original one. return 'Column<%s>' % self._jc.toString().encode('utf8') class PandasLikeDataFrame(_Frame): """ Methods that are relevant to dataframes. """ def __init__(self): """ Define additional private fields. * ``_pandas_metadata``: The metadata which stores column fields, and index fields and names. """ self._pandas_metadata = None @property def _metadata(self): if not hasattr(self, '_pandas_metadata') or self._pandas_metadata is None: self._pandas_metadata = Metadata(column_fields=self.schema.fieldNames()) return self._pandas_metadata @_metadata.setter def _metadata(self, metadata): self._pandas_metadata = metadata @property def _index_columns(self): return [anchor_wrap(self, self._spark_getitem(field)) for field in self._metadata.index_fields] @derived_from(pd.DataFrame) def iteritems(self): cols = list(self.columns) return list((col_name, self[col_name]) for col_name in cols) def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. By default yields a new object. :param keys: column label or list of column labels / arrays :param drop: boolean, default True Delete columns to be used as the new index :param append: boolean, default False Whether to append columns to existing index :param inplace: boolean, default False Modify the DataFrame in place (do not create a new object) :return: :class:`DataFrame` """ if isinstance(keys, string_types): keys = [keys] else: keys = list(keys) for key in keys: if key not in self.columns: raise KeyError(key) if drop: columns = [column for column in self._metadata.column_fields if column not in keys] else: columns = self._metadata.column_fields if append: index_info = self._metadata.index_info + [(column, column) for column in keys] else: index_info = [(column, column) for column in keys] metadata = self._metadata.copy(column_fields=columns, index_info=index_info) if inplace: self._metadata = metadata else: df = self.copy() df._metadata = metadata return df def reset_index(self, level=None, drop=False, inplace=False): """For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. :param level: int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default :param drop: boolean, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. :param inplace: boolean, default False Modify the DataFrame in place (do not create a new object) :return: :class:`DataFrame` """ if len(self._metadata.index_info) == 0: raise NotImplementedError('Can\'t reset index because there is no index.') multi_index = len(self._metadata.index_info) > 1 if multi_index: rename = lambda i: 'level_{}'.format(i) else: rename = lambda i: \ 'index' if 'index' not in self._metadata.column_fields else 'level_{}'.fomat(i) if level is None: index_columns = [(column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._metadata.index_info)] index_info = [] else: if isinstance(level, (int, string_types)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for l in level: if l >= len(self._metadata.index_info): raise IndexError('Too many levels: Index has only {} level, not {}' .format(len(self._metadata.index_info), l + 1)) idx = level elif all(isinstance(l, string_types) for l in level): idx = [] for l in level: try: i = self._metadata.index_fields.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})' .format(self._metadata.index_fields[0])) else: raise ValueError('Level should be all int or all string.') idx.sort() index_columns = [] index_info = self._metadata.index_info.copy() for i in idx: info = self._metadata.index_info[i] column_field, index_name = info index_columns.append((column_field, index_name if index_name is not None else rename(index_name))) index_info.remove(info) if drop: index_columns = [] metadata = self._metadata.copy( column_fields=[column for column, _ in index_columns] + self._metadata.column_fields, index_info=index_info) columns = [name for _, name in index_columns] + self._metadata.column_fields if inplace: self._metadata = metadata self.columns = columns else: df = self.copy() df._metadata = metadata df.columns = columns return df @derived_from(pd.DataFrame) def isnull(self): df = self.copy() for name, col in df.iteritems(): df[name] = col.isnull() return df isna = isnull @derived_from(pd.DataFrame) def notnull(self): df = self.copy() for name, col in df.iteritems(): df[name] = col.notnull() return df notna = notnull @derived_from(DataFrame) def toPandas(self): df = self._spark_select(self._metadata.all_fields) pdf = df._spark_toPandas() if len(pdf) == 0 and len(df.schema) > 0: # TODO: push to OSS pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in df.schema}) if len(self._metadata.index_info) > 0: append = False for index_field in self._metadata.index_fields: drop = index_field not in self._metadata.column_fields pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self._metadata.column_fields] index_names = self._metadata.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf @derived_from(pd.DataFrame) def assign(self, **kwargs): for k, v in kwargs.items(): if not (isinstance(v, (Column,)) or callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) if callable(v): kwargs[k] = v(self) pairs = list(kwargs.items()) df = self for (name, c) in pairs: df = df._spark_withColumn(name, c) df._metadata = self._metadata.copy( column_fields=(self._metadata.column_fields + [name for name, _ in pairs if name not in self._metadata.column_fields])) return df @property def loc(self): return SparkDataFrameLocator(self) def copy(self): df = DataFrame(self._jdf, self.sql_ctx) df._metadata = self._metadata.copy() return df @derived_from(pd.DataFrame) def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): if axis == 0 or axis == 'index': if subset is not None: if isinstance(subset, string_types): columns = [subset] else: columns = list(subset) invalids = [column for column in columns if column not in self._metadata.column_fields] if len(invalids) > 0: raise KeyError(invalids) else: columns = list(self.columns) cnt = reduce(lambda x, y: x + y, [F._spark_when(self[column].notna(), 1)._spark_otherwise(0) for column in columns], F._spark_lit(0)) if thresh is not None: pred = cnt >= F._spark_lit(int(thresh)) elif how == 'any': pred = cnt == F._spark_lit(len(columns)) elif how == 'all': pred = cnt > F._spark_lit(0) else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') df = self._spark_filter(pred) df._metadata = self._metadata.copy() if inplace: _reassign_jdf(self, df) else: return df else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") def head(self, n=5): df = self._spark_limit(n) df._metadata = self._metadata.copy() return df @property def columns(self): return pd.Index(self._metadata.column_fields) @columns.setter def columns(self, names): old_names = self._metadata.column_fields if len(old_names) != len(names): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(names))) df = self for (old_name, new_name) in zip(old_names, names): df = df._spark_withColumnRenamed(old_name, new_name) df._metadata = self._metadata.copy(column_fields=names) _reassign_jdf(self, df) def count(self): return self._spark_count() def unique(self): return DataFrame(self._jdf.distinct(), self.sql_ctx) @derived_from(pd.DataFrame) def drop(self, labels, axis=0, errors='raise'): axis = self._validate_axis(axis) if axis == 1: if isinstance(labels, list): df = self._spark_drop(*labels) df._metadata = self._metadata.copy( column_fields=[column for column in self._metadata.column_fields if column not in labels]) else: df = self._spark_drop(labels) df._metadata = self._metadata.copy( column_fields=[column for column in self._metadata.column_fields if column != labels]) return df # return self.map_partitions(M.drop, labels, axis=axis, errors=errors) raise NotImplementedError("Drop currently only works for axis=1") @derived_from(pd.DataFrame) def get(self, key, default=None): try: return anchor_wrap(self, self._pd_getitem(key)) except (KeyError, ValueError, IndexError): return default def sort_values(self, by): df = self._spark_sort(by) df._metadata = self._metadata return df def groupby(self, by): gp = self._spark_groupby(by) from .groups import PandasLikeGroupBy return PandasLikeGroupBy(self, gp, None) @derived_from(pd.DataFrame) def pipe(self, func, *args, **kwargs): # Taken from pandas: # https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707 if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) @property def shape(self): return len(self), len(self.columns) def _pd_getitem(self, key): if key is None: raise KeyError("none key") if isinstance(key, string_types): try: return self._spark_getitem(key) except AnalysisException: raise KeyError(key) if np.isscalar(key) or isinstance(key, (tuple, string_types)): raise NotImplementedError(key) elif isinstance(key, slice): return self.loc[key] if isinstance(key, (pd.Series, np.ndarray, pd.Index)): raise NotImplementedError(key) if isinstance(key, list): return self.loc[:, key] if isinstance(key, DataFrame): # TODO Should not implement alignment, too dangerous? return self._spark_getitem(key) if isinstance(key, Column): # TODO Should not implement alignment, too dangerous? # It is assumed to be only a filter, otherwise .loc should be used. bcol = key.cast("boolean") df = self._spark_getitem(bcol) df._metadata = self._metadata return anchor_wrap(self, df) raise NotImplementedError(key) def __getitem__(self, key): return anchor_wrap(self, self._pd_getitem(key)) def __setitem__(self, key, value): # For now, we don't support realignment against different dataframes. # This is too expensive in Spark. # Are we assigning against a column? if isinstance(value, Column): assert value._pandas_anchor is self,\ "Cannot combine column argument because it comes from a different dataframe" if isinstance(key, (tuple, list)): assert isinstance(value.schema, StructType) field_names = value.schema.fieldNames() df = self.assign(**{k: value[c] for k, c in zip(key, field_names)}) else: df = self.assign(**{key: value}) _reassign_jdf(self, df) def __getattr__(self, key): if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"): raise AttributeError(key) return anchor_wrap(self, self._spark_getattr(key)) def __iter__(self): return self.toPandas().__iter__() def __len__(self): return self._spark_count() def __dir__(self): fields = [f for f in self.schema.fieldNames() if ' ' not in f] return super(DataFrame, self).__dir__() + fields def _repr_html_(self): return self.head(max_display_count).toPandas()._repr_html_() @classmethod def _validate_axis(cls, axis=0): if axis not in (0, 1, 'index', 'columns', None): raise ValueError('No axis named {0}'.format(axis)) # convert to numeric axis return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis) def _reassign_jdf(target_df: DataFrame, new_df: DataFrame): """ Reassigns the java df contont of a dataframe. """ target_df._jdf = new_df._jdf target_df._metadata = new_df._metadata # Reset the cached variables target_df._schema = None target_df._lazy_rdd = None def _spark_col_apply(col_or_df, sfun): """ Performs a function to all cells on a dataframe, the function being a known sql function. """ if isinstance(col_or_df, Column): return sfun(col_or_df) assert isinstance(col_or_df, DataFrame) df = col_or_df df = df._spark_select([sfun(df[col]).alias(col) for col in df.columns]) return df def _reduce_spark(col_or_df, sfun): """ Performs a reduction on a dataframe, the function being a known sql function. """ if isinstance(col_or_df, Column): col = col_or_df df0 = col._spark_ref_dataframe._spark_select(sfun(col)) else: assert isinstance(col_or_df, DataFrame) df = col_or_df df0 = df._spark_select(sfun("*")) return _unpack_scalar(df0) def _unpack_scalar(df): """ Takes a dataframe that is supposed to contain a single row with a single scalar value, and returns this value. """ l = df.head(2).collect() assert len(l) == 1, (df, l) row = l[0] l2 = list(row.asDict().values()) assert len(l2) == 1, (row, l2) return l2[0] def anchor_wrap(df, col): """ Ensures that the column has an anchoring reference to the dataframe. This is required to get self-representable columns. :param df: dataframe or column :param col: a column :return: column """ if isinstance(col, Column): if isinstance(df, Column): ref = df._pandas_anchor else: assert isinstance(df, DataFrame), type(df) ref = df col._spark_ref_dataframe = ref return col def _col(df): assert isinstance(df, (DataFrame, pd.DataFrame)), type(df) return df[df.columns[0]]
1
8,227
With this fix, I am not even sure we need to overwrite this function.
databricks-koalas
py
@@ -311,6 +311,8 @@ class PackageEntry(object): class Package(object): """ In-memory representation of a package """ + use_tqdm = os.getenv('QUILT_USE_TQDM').lower() == 'true' + def __init__(self): self._children = {} self._meta = {'version': 'v0'}
1
from collections import deque import gc import hashlib import io import json import pathlib import os import re import shutil import time from multiprocessing import Pool import uuid import warnings import jsonlines from tenacity import retry, stop_after_attempt, wait_exponential from tqdm import tqdm from .data_transfer import ( calculate_sha256, copy_file, copy_file_list, get_bytes, get_size_and_version, list_object_versions, list_url, put_bytes ) from .exceptions import PackageException from .formats import FormatRegistry from .telemetry import ApiTelemetry from .util import ( QuiltException, fix_url, get_from_config, get_install_location, validate_package_name, quiltignore_filter, validate_key, extract_file_extension, parse_sub_package_name) from .util import CACHE_PATH, TEMPFILE_DIR_PATH as APP_DIR_TEMPFILE_DIR, PhysicalKey, get_from_config, \ user_is_configured_to_custom_stack, catalog_package_url MAX_FIX_HASH_RETRIES = 3 def hash_file(readable_file): """ Returns SHA256 hash of readable file-like object """ buf = readable_file.read(4096) hasher = hashlib.sha256() while buf: hasher.update(buf) buf = readable_file.read(4096) return hasher.hexdigest() def _delete_local_physical_key(pk): assert pk.is_local(), "This function only works on files that live on a local disk" pathlib.Path(pk.path).unlink() def _filesystem_safe_encode(key): """Returns the sha256 of the key. This ensures there are no slashes, uppercase/lowercase conflicts, avoids `OSError: [Errno 36] File name too long:`, etc.""" return hashlib.sha256(key.encode()).hexdigest() class ObjectPathCache(object): @classmethod def _cache_path(cls, url): url_hash = _filesystem_safe_encode(url) return CACHE_PATH / url_hash[0:2] / url_hash[2:] @classmethod def get(cls, url): cache_path = cls._cache_path(url) try: with open(cache_path) as fd: path, dev, ino, mtime = json.load(fd) except (FileNotFoundError, ValueError): return None try: stat = pathlib.Path(path).stat() except FileNotFoundError: return None # check if device, file, and timestamp are unchanged => cache hit # see also https://docs.python.org/3/library/os.html#os.stat_result if stat.st_dev == dev and stat.st_ino == ino and stat.st_mtime_ns == mtime: return path else: return None @classmethod def set(cls, url, path): stat = pathlib.Path(path).stat() cache_path = cls._cache_path(url) cache_path.parent.mkdir(parents=True, exist_ok=True) with open(cache_path, 'w') as fd: json.dump([path, stat.st_dev, stat.st_ino, stat.st_mtime_ns], fd) @classmethod def clear(cls): shutil.rmtree(CACHE_PATH) class PackageEntry(object): """ Represents an entry at a logical key inside a package. """ __slots__ = ['physical_key', 'size', 'hash', '_meta'] def __init__(self, physical_key, size, hash_obj, meta): """ Creates an entry. Args: physical_key: a URI (either `s3://` or `file://`) size(number): size of object in bytes hash({'type': string, 'value': string}): hash object for example: {'type': 'SHA256', 'value': 'bb08a...'} meta(dict): metadata dictionary Returns: a PackageEntry """ assert isinstance(physical_key, PhysicalKey) self.physical_key = physical_key self.size = size self.hash = hash_obj self._meta = meta or {} def __eq__(self, other): return ( # Don't check physical keys. self.size == other.size and self.hash == other.hash and self._meta == other._meta ) def __repr__(self): return f"PackageEntry('{self.physical_key}')" def as_dict(self): """ Returns dict representation of entry. """ return { 'physical_keys': [str(self.physical_key)], 'size': self.size, 'hash': self.hash, 'meta': self._meta } @property def meta(self): return self._meta.get('user_meta', dict()) def set_meta(self, meta): """ Sets the user_meta for this PackageEntry. """ self._meta['user_meta'] = meta def _verify_hash(self, read_bytes): """ Verifies hash of bytes """ if self.hash is None: raise QuiltException("Hash missing - need to build the package") if self.hash.get('type') != 'SHA256': raise NotImplementedError digest = hashlib.sha256(read_bytes).hexdigest() if digest != self.hash.get('value'): raise QuiltException("Hash validation failed") def set(self, path=None, meta=None): """ Returns self with the physical key set to path. Args: logical_key(string): logical key to update path(string): new path to place at logical_key in the package Currently only supports a path on local disk meta(dict): metadata dict to attach to entry. If meta is provided, set just updates the meta attached to logical_key without changing anything else in the entry Returns: self """ if path is not None: self.physical_key = PhysicalKey.from_url(fix_url(path)) self.size = None self.hash = None elif meta is not None: self.set_meta(meta) else: raise PackageException('Must specify either path or meta') def get(self): """ Returns the physical key of this PackageEntry. """ return str(self.physical_key) def get_cached_path(self): """ Returns a locally cached physical key, if available. """ if not self.physical_key.is_local(): return ObjectPathCache.get(str(self.physical_key)) return None def get_bytes(self, use_cache_if_available=True): """ Returns the bytes of the object this entry corresponds to. If 'use_cache_if_available'=True, will first try to retrieve the bytes from cache. """ if use_cache_if_available: cached_path = self.get_cached_path() if cached_path is not None: return get_bytes(PhysicalKey(None, cached_path, None)) data = get_bytes(self.physical_key) return data def get_as_json(self, use_cache_if_available=True): """ Returns a JSON file as a `dict`. Assumes that the file is encoded using utf-8. If 'use_cache_if_available'=True, will first try to retrieve the object from cache. """ obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available) return json.loads(obj_bytes.decode("utf-8")) def get_as_string(self, use_cache_if_available=True): """ Return the object as a string. Assumes that the file is encoded using utf-8. If 'use_cache_if_available'=True, will first try to retrieve the object from cache. """ obj_bytes = self.get_bytes(use_cache_if_available=use_cache_if_available) return obj_bytes.decode("utf-8") def deserialize(self, func=None, **format_opts): """ Returns the object this entry corresponds to. Args: func: Skip normal deserialization process, and call func(bytes), returning the result directly. **format_opts: Some data formats may take options. Though normally handled by metadata, these can be overridden here. Returns: The deserialized object from the logical_key Raises: physical key failure hash verification fail when deserialization metadata is not present """ data = get_bytes(self.physical_key) if func is not None: return func(data) pkey_ext = pathlib.PurePosixPath(self.physical_key.path).suffix # Verify format can be handled before checking hash. Raises if none found. formats = FormatRegistry.search(None, self._meta, pkey_ext) # Verify hash before deserializing.. self._verify_hash(data) return formats[0].deserialize(data, self._meta, pkey_ext, **format_opts) def fetch(self, dest=None): """ Gets objects from entry and saves them to dest. Args: dest: where to put the files Defaults to the entry name Returns: None """ if dest is None: name = self.physical_key.basename() dest = PhysicalKey.from_path('.').join(name) else: dest = PhysicalKey.from_url(fix_url(dest)) copy_file(self.physical_key, dest) # return a package reroot package physical keys after the copy operation succeeds # see GH#388 for context return self.with_physical_key(dest) def __call__(self, func=None, **kwargs): """ Shorthand for self.deserialize() """ return self.deserialize(func=func, **kwargs) def with_physical_key(self, key): return self.__class__(key, self.size, self.hash, self._meta) @property def physical_keys(self): """ Deprecated """ return [self.physical_key] class Package(object): """ In-memory representation of a package """ def __init__(self): self._children = {} self._meta = {'version': 'v0'} @ApiTelemetry("package.__repr__") def __repr__(self, max_lines=20): """ String representation of the Package. """ def _create_str(results_dict, level=0, parent=True): """ Creates a string from the results dict """ result = '' keys = sorted(results_dict.keys()) if not keys: return result if parent: has_remote_entries = any( self._map( lambda lk, entry: not entry.physical_key.is_local() ) ) pkg_type = 'remote' if has_remote_entries else 'local' result = f'({pkg_type} Package)\n' for key in keys: result += ' ' + (' ' * level) + '└─' + key + '\n' result += _create_str(results_dict[key], level + 1, parent=False) return result if not self.keys(): return '(empty Package)' # traverse the tree of package directories and entries to get the list of # display objects. candidates is a deque of shape # ((logical_key, Package | PackageEntry), [list of parent key]) candidates = deque(([x, []] for x in self._children.items())) results_dict = {} results_total = 0 more_objects_than_lines = False while candidates: [[logical_key, entry], parent_keys] = candidates.popleft() if isinstance(entry, Package): logical_key = logical_key + '/' new_parent_keys = parent_keys.copy() new_parent_keys.append(logical_key) for child_key in sorted(entry.keys()): candidates.append([[child_key, entry[child_key]], new_parent_keys]) current_result_level = results_dict for key in parent_keys: current_result_level = current_result_level[key] current_result_level[logical_key] = {} results_total += 1 if results_total >= max_lines: more_objects_than_lines = True break repr_str = _create_str(results_dict) # append '...' if the package is larger than max_size if more_objects_than_lines: repr_str += ' ' + '...\n' return repr_str @property def meta(self): return self._meta.get('user_meta', dict()) @classmethod @ApiTelemetry("package.install") def install(cls, name, registry=None, top_hash=None, dest=None, dest_registry=None): """ Installs a named package to the local registry and downloads its files. Args: name(str): Name of package to install. It also can be passed as NAME/PATH, in this case only the sub-package or the entry specified by PATH will be downloaded. registry(str): Registry where package is located. Defaults to the default remote registry. top_hash(str): Hash of package to install. Defaults to latest. dest(str): Local path to download files to. dest_registry(str): Registry to install package to. Defaults to local registry. """ if registry is None: registry = get_from_config('default_remote_registry') if registry is None: raise QuiltException( "No registry specified and no default_remote_registry configured. Please " "specify a registry or configure a default remote registry with quilt3.config" ) else: registry = fix_url(registry) registry_parsed = PhysicalKey.from_url(registry) if dest_registry is None: dest_registry = get_from_config('default_local_registry') else: dest_registry = fix_url(dest_registry) dest_registry_parsed = PhysicalKey.from_url(dest_registry) if not dest_registry_parsed.is_local(): raise QuiltException( f"Can only 'install' to a local registry, but 'dest_registry' " f"{dest_registry!r} is a remote path. To store a package in a remote " f"registry, use 'push' or 'build' instead." ) if dest is None: dest_parsed = PhysicalKey.from_url(get_install_location()).join(name) else: dest_parsed = PhysicalKey.from_url(fix_url(dest)) if not dest_parsed.is_local(): raise QuiltException( f"Invalid package destination path {dest!r}. 'dest', if set, must point at " f"the local filesystem. To copy a package to a remote registry use 'push' or " f"'build' instead." ) parts = parse_sub_package_name(name) if parts and parts[1]: name, subpkg_key = parts validate_key(subpkg_key) else: subpkg_key = None pkg = cls._browse(name=name, registry=registry, top_hash=top_hash) message = pkg._meta.get('message', None) # propagate the package message file_list = [] if subpkg_key is not None: if subpkg_key not in pkg: raise QuiltException(f"Package {name} doesn't contain {subpkg_key!r}.") entry = pkg[subpkg_key] entries = entry.walk() if isinstance(entry, Package) else ((subpkg_key.split('/')[-1], entry),) else: entries = pkg.walk() for logical_key, entry in entries: # Copy the datafiles in the package. physical_key = entry.physical_key # Try a local cache. cached_file = ObjectPathCache.get(str(physical_key)) if cached_file is not None: physical_key = PhysicalKey.from_path(cached_file) new_physical_key = dest_parsed.join(logical_key) if physical_key != new_physical_key: file_list.append((physical_key, new_physical_key, entry.size)) def _maybe_add_to_cache(old: PhysicalKey, new: PhysicalKey, _): if not old.is_local() and new.is_local(): ObjectPathCache.set(str(old), new.path) copy_file_list(file_list, callback=_maybe_add_to_cache, message="Copying objects") pkg._build(name, registry=dest_registry, message=message) if top_hash is None: top_hash = pkg.top_hash short_tophash = Package._shorten_tophash(name, dest_registry_parsed, top_hash) print(f"Successfully installed package '{name}', tophash={short_tophash} from {registry}") @classmethod def resolve_hash(cls, registry, hash_prefix): """ Find a hash that starts with a given prefix. Args: registry(string): location of registry hash_prefix(string): hash prefix with length between 6 and 64 characters """ assert isinstance(registry, PhysicalKey) if len(hash_prefix) == 64: top_hash = hash_prefix elif 6 <= len(hash_prefix) < 64: matching_hashes = [h for h, _ in list_url(registry.join('.quilt/packages/')) if h.startswith(hash_prefix)] if not matching_hashes: raise QuiltException("Found zero matches for %r" % hash_prefix) elif len(matching_hashes) > 1: raise QuiltException("Found multiple matches: %r" % hash_prefix) else: top_hash = matching_hashes[0] else: raise QuiltException("Invalid hash: %r" % hash_prefix) return top_hash @classmethod def _shorten_tophash(cls, package_name, registry: PhysicalKey, top_hash): min_shorthash_len = 7 matches = [h for h, _ in list_url(registry.join('.quilt/packages/')) if h.startswith(top_hash[:min_shorthash_len])] if len(matches) == 0: raise ValueError(f"Tophash {top_hash} was not found in registry {registry}") for prefix_length in range(min_shorthash_len, 64): potential_shorthash = top_hash[:prefix_length] matches = [h for h in matches if h.startswith(potential_shorthash)] if len(matches) == 1: return potential_shorthash @classmethod @ApiTelemetry("package.browse") def browse(cls, name, registry=None, top_hash=None): """ Load a package into memory from a registry without making a local copy of the manifest. Args: name(string): name of package to load registry(string): location of registry to load package from top_hash(string): top hash of package version to load """ return cls._browse(name=name, registry=registry, top_hash=top_hash) @classmethod def _browse(cls, name, registry=None, top_hash=None): validate_package_name(name) if registry is None: registry = get_from_config('default_local_registry') else: registry = fix_url(registry) registry_parsed = PhysicalKey.from_url(registry) if top_hash is None: top_hash_file = registry_parsed.join(f'.quilt/named_packages/{name}/latest') top_hash = get_bytes(top_hash_file).decode('utf-8').strip() else: top_hash = cls.resolve_hash(registry_parsed, top_hash) # TODO: verify that name is correct with respect to this top_hash pkg_manifest = registry_parsed.join(f'.quilt/packages/{top_hash}') if pkg_manifest.is_local(): local_pkg_manifest = pkg_manifest.path else: local_pkg_manifest = CACHE_PATH / "manifest" / _filesystem_safe_encode(str(pkg_manifest)) if not local_pkg_manifest.exists(): # Copy to a temporary file first, to make sure we don't cache a truncated file # if the download gets interrupted. tmp_path = local_pkg_manifest.with_suffix('.tmp') copy_file(pkg_manifest, PhysicalKey.from_path(tmp_path), message="Downloading manifest") tmp_path.rename(local_pkg_manifest) return cls._from_path(local_pkg_manifest) @classmethod def _from_path(cls, path): """ Takes a path and returns a package loaded from that path""" with open(path) as open_file: pkg = cls._load(open_file) return pkg @classmethod def _split_key(cls, logical_key): """ Converts a string logical key like 'a/b/c' into a list of ['a', 'b', 'c']. Returns the original key if it's already a list or a tuple. """ if isinstance(logical_key, str): path = logical_key.split('/') elif isinstance(logical_key, (tuple, list)): path = logical_key else: raise TypeError('Invalid logical_key: %r' % logical_key) return path def __contains__(self, logical_key): """ Checks whether the package contains a specified logical_key. Returns: True or False """ try: self[logical_key] return True except KeyError: return False def __getitem__(self, logical_key): """ Filters the package based on prefix, and returns either a new Package or a PackageEntry. Args: prefix(str): prefix to filter on Returns: PackageEntry if prefix matches a logical_key exactly otherwise Package """ pkg = self for key_fragment in self._split_key(logical_key): pkg = pkg._children[key_fragment] return pkg @ApiTelemetry("package.fetch") def fetch(self, dest='./'): """ Copy all descendants to `dest`. Descendants are written under their logical names _relative_ to self. Args: dest: where to put the files (locally) Returns: A new Package object with entries from self, but with physical keys pointing to files in `dest`. """ nice_dest = PhysicalKey.from_url(fix_url(dest)) file_list = [] pkg = Package() for logical_key, entry in self.walk(): physical_key = entry.physical_key new_physical_key = nice_dest.join(logical_key) file_list.append((physical_key, new_physical_key, entry.size)) # return a package reroot package physical keys after the copy operation succeeds # see GH#388 for context new_entry = entry.with_physical_key(new_physical_key) pkg._set(logical_key, new_entry) copy_file_list(file_list, message="Copying objects") return pkg def keys(self): """ Returns logical keys in the package. """ return self._children.keys() def __iter__(self): return iter(self._children) def __len__(self): return len(self._children) def walk(self): """ Generator that traverses all entries in the package tree and returns tuples of (key, entry), with keys in alphabetical order. """ for name, child in sorted(self._children.items()): if isinstance(child, PackageEntry): yield name, child else: for key, value in child.walk(): yield name + '/' + key, value def _walk_dir_meta(self): """ Generator that traverses all entries in the package tree and returns tuples of (key, meta) for each directory with metadata. Keys will all end in '/' to indicate that they are directories. """ for key, child in sorted(self._children.items()): if isinstance(child, PackageEntry): continue meta = child.meta if meta: yield key + '/', meta for child_key, child_meta in child._walk_dir_meta(): yield key + '/' + child_key, child_meta @classmethod @ApiTelemetry("package.load") def load(cls, readable_file): """ Loads a package from a readable file-like object. Args: readable_file: readable file-like object to deserialize package from Returns: A new Package object Raises: file not found json decode error invalid package exception """ return cls._load(readable_file=readable_file) @classmethod def _load(cls, readable_file): gc.disable() # Experiments with COCO (650MB manifest) show disabling GC gives us ~2x performance improvement try: line_count = 0 for _ in readable_file: line_count += 1 readable_file.seek(0) reader = jsonlines.Reader(readable_file, loads=json.loads) with tqdm(desc="Loading manifest", total=line_count, unit="entries") as tqdm_progress: meta = reader.read() meta.pop('top_hash', None) # Obsolete as of PR #130 pkg = cls() pkg._meta = meta tqdm_progress.update(1) for obj in reader: path = cls._split_key(obj.pop('logical_key')) subpkg = pkg._ensure_subpackage(path[:-1]) key = path[-1] if not obj.get('physical_keys', None): # directory-level metadata subpkg.set_meta(obj['meta']) continue if key in subpkg._children: raise PackageException("Duplicate logical key while loading package") subpkg._children[key] = PackageEntry( PhysicalKey.from_url(obj['physical_keys'][0]), obj['size'], obj['hash'], obj['meta'] ) tqdm_progress.update(1) finally: gc.enable() return pkg def set_dir(self, lkey, path=None, meta=None): """ Adds all files from `path` to the package. Recursively enumerates every file in `path`, and adds them to the package according to their relative location to `path`. Args: lkey(string): prefix to add to every logical key, use '/' for the root of the package. path(string): path to scan for files to add to package. If None, lkey will be substituted in as the path. meta(dict): user level metadata dict to attach to lkey directory entry. Returns: self Raises: When `path` doesn't exist """ lkey = lkey.strip("/") if not lkey or lkey == '.' or lkey == './': root = self else: validate_key(lkey) root = self._ensure_subpackage(self._split_key(lkey)) root.set_meta(meta) if path: src = PhysicalKey.from_url(fix_url(path)) else: src = PhysicalKey.from_path(lkey) # TODO: deserialization metadata if src.is_local(): src_path = pathlib.Path(src.path) if not src_path.is_dir(): raise PackageException("The specified directory doesn't exist") files = src_path.rglob('*') ignore = src_path / '.quiltignore' if ignore.exists(): files = quiltignore_filter(files, ignore, 'file') for f in files: if not f.is_file(): continue entry = PackageEntry(PhysicalKey.from_path(f), f.stat().st_size, None, None) logical_key = f.relative_to(src_path).as_posix() root._set(logical_key, entry) else: if src.version_id is not None: raise PackageException("Directories cannot have versions") src_path = src.path if src.basename() != '': src_path += '/' objects, _ = list_object_versions(src.bucket, src_path) for obj in objects: if not obj['IsLatest']: continue # Skip S3 pseduo directory files and Keys that end in / if obj['Key'].endswith('/'): if obj['Size'] != 0: warnings.warn(f'Logical keys cannot end in "/", skipping: {obj["Key"]}') continue obj_pk = PhysicalKey(src.bucket, obj['Key'], obj.get('VersionId')) entry = PackageEntry(obj_pk, obj['Size'], None, None) logical_key = obj['Key'][len(src_path):] root._set(logical_key, entry) return self def get(self, logical_key): """ Gets object from logical_key and returns its physical path. Equivalent to self[logical_key].get(). Args: logical_key(string): logical key of the object to get Returns: Physical path as a string. Raises: KeyError: when logical_key is not present in the package ValueError: if the logical_key points to a Package rather than PackageEntry. """ obj = self[logical_key] if not isinstance(obj, PackageEntry): raise ValueError("Key does not point to a PackageEntry") return obj.get() def readme(self): """ Returns the README PackageEntry The README is the entry with the logical key 'README.md' (case-sensitive). Will raise a QuiltException if no such entry exists. """ if "README.md" not in self: ex_msg = f"This Package is missing a README file. A Quilt recognized README file is a file named " \ f"'README.md' (case-insensitive)" raise QuiltException(ex_msg) return self["README.md"] def set_meta(self, meta): """ Sets user metadata on this Package. """ self._meta['user_meta'] = meta return self @retry(stop=stop_after_attempt(MAX_FIX_HASH_RETRIES), wait=wait_exponential(multiplier=1, min=1, max=10), reraise=True) def _fix_sha256(self): """ Calculate and set missing hash values """ self._incomplete_entries = [entry for key, entry in self.walk() if entry.hash is None] physical_keys = [] sizes = [] for entry in self._incomplete_entries: physical_keys.append(entry.physical_key) sizes.append(entry.size) results = calculate_sha256(physical_keys, sizes) entries_w_missing_hash = [] for entry, obj_hash in zip(self._incomplete_entries, results): if obj_hash is None: entries_w_missing_hash.append(entry) else: entry.hash = dict(type='SHA256', value=obj_hash) self._incomplete_entries = entries_w_missing_hash if self._incomplete_entries: incomplete_manifest_path = self._dump_manifest_to_scratch() msg = "Unable to reach S3 for some hash values. Incomplete manifest saved to {path}." raise PackageException(msg.format(path=incomplete_manifest_path)) def _set_commit_message(self, msg): """ Sets a commit message. Args: msg: a message string Returns: None Raises: a ValueError if msg is not a string """ if msg is not None and not isinstance(msg, str): raise ValueError( f"The package commit message must be a string, but the message provided is an " f"instance of {type(msg)}." ) self._meta.update({'message': msg}) def _dump_manifest_to_scratch(self): registry = get_from_config('default_local_registry') registry_parsed = PhysicalKey.from_url(registry) pkg_manifest_file = registry_parsed.join("scratch").join(str(int(time.time()))) manifest = io.BytesIO() self._dump(manifest) put_bytes( manifest.getvalue(), pkg_manifest_file ) return pkg_manifest_file.path @ApiTelemetry("package.build") def build(self, name, registry=None, message=None): """ Serializes this package to a registry. Args: name: optional name for package registry: registry to build to defaults to local registry message: the commit message of the package Returns: The top hash as a string. """ return self._build(name=name, registry=registry, message=message) def _build(self, name, registry, message): validate_package_name(name) if registry is None: registry = get_from_config('default_local_registry') else: registry = fix_url(registry) registry_parsed = PhysicalKey.from_url(registry) self._set_commit_message(message) self._fix_sha256() manifest = io.BytesIO() self._dump(manifest) pkg_manifest_file = registry_parsed.join(f'.quilt/packages/{self.top_hash}') put_bytes( manifest.getvalue(), pkg_manifest_file ) named_path = registry_parsed.join(f'.quilt/named_packages/{name}') hash_bytes = self.top_hash.encode('utf-8') # TODO: use a float to string formater instead of double casting timestamp_path = named_path.join(str(int(time.time()))) latest_path = named_path.join("latest") put_bytes(hash_bytes, timestamp_path) put_bytes(hash_bytes, latest_path) return self @ApiTelemetry("package.dump") def dump(self, writable_file): """ Serializes this package to a writable file-like object. Args: writable_file: file-like object to write serialized package. Returns: None Raises: fail to create file fail to finish write """ return self._dump(writable_file) def _dump(self, writable_file): writer = jsonlines.Writer(writable_file) for line in self.manifest: writer.write(line) @property def manifest(self): """ Provides a generator of the dicts that make up the serialized package. """ yield self._meta for dir_key, meta in self._walk_dir_meta(): yield {'logical_key': dir_key, 'meta': meta} for logical_key, entry in self.walk(): yield {'logical_key': logical_key, **entry.as_dict()} def set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None): """ Returns self with the object at logical_key set to entry. Args: logical_key(string): logical key to update entry(PackageEntry OR string OR object): new entry to place at logical_key in the package. If entry is a string, it is treated as a URL, and an entry is created based on it. If entry is None, the logical key string will be substituted as the entry value. If entry is an object and quilt knows how to serialize it, it will immediately be serialized and written to disk, either to serialization_location or to a location managed by quilt. List of types that Quilt can serialize is available by calling `quilt3.formats.FormatRegistry.all_supported_formats()` meta(dict): user level metadata dict to attach to entry serialization_format_opts(dict): Optional. If passed in, only used if entry is an object. Options to help Quilt understand how the object should be serialized. Useful for underspecified file formats like csv when content contains confusing characters. Will be passed as kwargs to the FormatHandler.serialize() function. See docstrings for individual FormatHandlers for full list of options - https://github.com/quiltdata/quilt/blob/master/api/python/quilt3/formats.py serialization_location(string): Optional. If passed in, only used if entry is an object. Where the serialized object should be written, e.g. "./mydataframe.parquet" Returns: self """ return self._set(logical_key=logical_key, entry=entry, meta=meta, serialization_location=serialization_location, serialization_format_opts=serialization_format_opts) def _set(self, logical_key, entry=None, meta=None, serialization_location=None, serialization_format_opts=None): if not logical_key or logical_key.endswith('/'): raise QuiltException( f"Invalid logical key {logical_key!r}. " f"A package entry logical key cannot be a directory." ) validate_key(logical_key) if entry is None: entry = pathlib.Path(logical_key).resolve().as_uri() if isinstance(entry, (str, os.PathLike)): src = PhysicalKey.from_url(fix_url(str(entry))) size, version_id = get_size_and_version(src) # Determine if a new version needs to be appended. if not src.is_local() and src.version_id is None and version_id is not None: src.version_id = version_id entry = PackageEntry(src, size, None, None) elif isinstance(entry, PackageEntry): assert meta is None elif FormatRegistry.object_is_serializable(entry): # Use file extension from serialization_location, fall back to file extension from logical_key # If neither has a file extension, Quilt picks the serialization format. logical_key_ext = extract_file_extension(logical_key) serialize_loc_ext = None if serialization_location is not None: serialize_loc_ext = extract_file_extension(serialization_location) if logical_key_ext is not None and serialize_loc_ext is not None: assert logical_key_ext == serialize_loc_ext, f"The logical_key and the serialization_location have " \ f"different file extensions: {logical_key_ext} vs " \ f"{serialize_loc_ext}. Quilt doesn't know which to use!" if serialize_loc_ext is not None: ext = serialize_loc_ext elif logical_key_ext is not None: ext = logical_key_ext else: ext = None format_handlers = FormatRegistry.search(type(entry)) if ext: format_handlers = [f for f in format_handlers if ext in f.handled_extensions] if len(format_handlers) == 0: error_message = f'Quilt does not know how to serialize a {type(entry)}' if ext is not None: error_message += f' as a {ext} file.' error_message += f'. If you think this should be supported, please open an issue or PR at ' \ f'https://github.com/quiltdata/quilt' raise QuiltException(error_message) if serialization_format_opts is None: serialization_format_opts = {} serialized_object_bytes, new_meta = format_handlers[0].serialize(entry, meta=None, ext=ext, **serialization_format_opts) if serialization_location is None: serialization_path = APP_DIR_TEMPFILE_DIR / str(uuid.uuid4()) if ext: serialization_path = serialization_path.with_suffix(f'.{ext}') else: serialization_path = pathlib.Path(serialization_location).expanduser().resolve() serialization_path.parent.mkdir(exist_ok=True, parents=True) serialization_path.write_bytes(serialized_object_bytes) size = serialization_path.stat().st_size write_pk = PhysicalKey.from_path(serialization_path) entry = PackageEntry(write_pk, size, hash_obj=None, meta=new_meta) else: raise TypeError(f"Expected a string for entry, but got an instance of {type(entry)}.") if meta is not None: entry.set_meta(meta) path = self._split_key(logical_key) pkg = self._ensure_subpackage(path[:-1], ensure_no_entry=True) if path[-1] in pkg and isinstance(pkg[path[-1]], Package): raise QuiltException("Cannot overwrite directory with PackageEntry") pkg._children[path[-1]] = entry return self def _ensure_subpackage(self, path, ensure_no_entry=False): """ Creates a package and any intermediate packages at the given path. Args: path(list): logical key as a list or tuple ensure_no_entry(boolean): if True, throws if this would overwrite a PackageEntry that already exists in the tree. Returns: newly created or existing package at that path """ pkg = self for key_fragment in path: if ensure_no_entry and key_fragment in pkg \ and isinstance(pkg[key_fragment], PackageEntry): raise QuiltException("Already a PackageEntry along the path.") pkg = pkg._children.setdefault(key_fragment, Package()) return pkg def delete(self, logical_key): """ Returns the package with logical_key removed. Returns: self Raises: KeyError: when logical_key is not present to be deleted """ path = self._split_key(logical_key) pkg = self[path[:-1]] del pkg._children[path[-1]] return self @property def top_hash(self): """ Returns the top hash of the package. Note that physical keys are not hashed because the package has the same semantics regardless of where the bytes come from. Returns: A string that represents the top hash of the package """ top_hash = hashlib.sha256() assert 'top_hash' not in self._meta top_meta = json.dumps(self._meta, sort_keys=True, separators=(',', ':')) top_hash.update(top_meta.encode('utf-8')) for logical_key, entry in self.walk(): if entry.hash is None or entry.size is None: raise QuiltException( "PackageEntry missing hash and/or size: %s" % entry.physical_key ) entry_dict = entry.as_dict() entry_dict['logical_key'] = logical_key entry_dict.pop('physical_keys', None) entry_dict_str = json.dumps(entry_dict, sort_keys=True, separators=(',', ':')) top_hash.update(entry_dict_str.encode('utf-8')) return top_hash.hexdigest() @ApiTelemetry("package.push") def push(self, name, registry=None, dest=None, message=None, selector_fn=lambda logical_key, package_entry: True): """ Copies objects to path, then creates a new package that points to those objects. Copies each object in this package to path according to logical key structure, then adds to the registry a serialized version of this package with physical keys that point to the new copies. Note that push is careful to not push data unnecessarily. To illustrate, imagine you have a PackageEntry: `pkg["entry_1"].physical_key = "/tmp/package_entry_1.json"` If that entry would be pushed to `s3://bucket/prefix/entry_1.json`, but `s3://bucket/prefix/entry_1.json` already contains the exact same bytes as '/tmp/package_entry_1.json', `quilt3` will not push the bytes to s3, no matter what `selector_fn('entry_1', pkg["entry_1"])` returns. However, selector_fn will dictate whether the new package points to the local file or to s3: If `selector_fn('entry_1', pkg["entry_1"]) == False`, `new_pkg["entry_1"] = ["/tmp/package_entry_1.json"]` If `selector_fn('entry_1', pkg["entry_1"]) == True`, `new_pkg["entry_1"] = ["s3://bucket/prefix/entry_1.json"]` Args: name: name for package in registry dest: where to copy the objects in the package registry: registry where to create the new package message: the commit message for the new package selector_fn: An optional function that determines which package entries should be copied to S3. The function takes in two arguments, logical_key and package_entry, and should return False if that PackageEntry should be skipped during push. If for example you have a package where the files are spread over multiple buckets and you add a single local file, you can use selector_fn to only push the local file to s3 (instead of pushing all data to the destination bucket). Returns: A new package that points to the copied objects. """ validate_package_name(name) if registry is None: registry = get_from_config('default_remote_registry') if registry is None: raise QuiltException( "No registry specified and no default remote registry configured. Please " "specify a registry or configure a default remote registry with quilt3.config" ) registry_parsed = PhysicalKey.from_url(fix_url(registry)) else: registry_parsed = PhysicalKey.from_url(fix_url(registry)) if not registry_parsed.is_local(): if registry_parsed.path != '': raise QuiltException( f"The 'registry' argument expects an S3 bucket but the S3 object path " f"{registry!r} was provided instead. You probably wanted to set " f"'registry' to {'s3://' + registry_parsed.bucket!r} instead. To specify that package " f"data land in a specific directory use 'dest'." ) else: raise QuiltException( f"Can only 'push' to remote registries in S3, but {registry!r} " f"is a local file. To store a package in the local registry, use " f"'build' instead." ) if dest is None: dest_parsed = registry_parsed.join(name) else: dest_parsed = PhysicalKey.from_url(fix_url(dest)) if dest_parsed.bucket != registry_parsed.bucket: raise QuiltException( f"Invalid package destination path {dest!r}. 'dest', if set, must be a path " f"in the {registry!r} package registry specified by 'registry'." ) self._fix_sha256() pkg = self.__class__() pkg._meta = self._meta # Since all that is modified is physical keys, pkg will have the same top hash file_list = [] entries = [] for logical_key, entry in self.walk(): if not selector_fn(logical_key, entry): pkg._set(logical_key, entry) continue # Copy the datafiles in the package. physical_key = entry.physical_key new_physical_key = dest_parsed.join(logical_key) if (physical_key.bucket == new_physical_key.bucket and physical_key.path == new_physical_key.path): # No need to copy - re-use the original physical key. pkg._set(logical_key, entry) else: entries.append((logical_key, entry)) file_list.append((physical_key, new_physical_key, entry.size)) results = copy_file_list(file_list, message="Copying objects") for (logical_key, entry), versioned_key in zip(entries, results): # Create a new package entry pointing to the new remote key. assert versioned_key is not None new_entry = entry.with_physical_key(versioned_key) pkg._set(logical_key, new_entry) def physical_key_is_temp_file(pk): if not pk.is_local(): return False return pathlib.Path(pk.path).parent == APP_DIR_TEMPFILE_DIR temp_file_logical_keys = [lk for lk, entry in self.walk() if physical_key_is_temp_file(entry.physical_key)] temp_file_physical_keys = [self[lk].physical_key for lk in temp_file_logical_keys] # Now that data has been pushed, delete tmp files created by pkg.set('KEY', obj) with Pool(10) as p: p.map(_delete_local_physical_key, temp_file_physical_keys) # Update old package to point to the materialized location of the file since the tempfile no longest exists for lk in temp_file_logical_keys: self._set(lk, pkg[lk]) pkg._build(name, registry=registry, message=message) shorthash = Package._shorten_tophash(name, PhysicalKey.from_url(registry), pkg.top_hash) print(f"Package {name}@{shorthash} pushed to s3://{dest_parsed.bucket}") if user_is_configured_to_custom_stack(): navigator_url = get_from_config("navigator_url") print(f"Visit {catalog_package_url(navigator_url, dest_parsed.bucket, name)}") else: dest_s3_url = str(dest_parsed) if not dest_s3_url.endswith("/"): dest_s3_url += "/" print(f"Run `quilt3 catalog {dest_s3_url}` to browse.") return pkg @classmethod def rollback(cls, name, registry, top_hash): """ Set the "latest" version to the given hash. Args: name(str): Name of package to rollback. registry(str): Registry where package is located. top_hash(str): Hash to rollback to. """ registry = PhysicalKey.from_url(fix_url(registry)) validate_package_name(name) top_hash = cls.resolve_hash(registry, top_hash) hash_path = registry.join(f'.quilt/packages/{top_hash}') latest_path = registry.join(f'.quilt/named_packages/{name}/latest') # Check that both latest and top_hash actually exist. get_size_and_version(hash_path) get_size_and_version(latest_path) put_bytes(top_hash.encode('utf-8'), latest_path) @ApiTelemetry("package.diff") def diff(self, other_pkg): """ Returns three lists -- added, modified, deleted. Added: present in other_pkg but not in self. Modified: present in both, but different. Deleted: present in self, but not other_pkg. Args: other_pkg: Package to diff Returns: added, modified, deleted (all lists of logical keys) """ deleted = [] modified = [] other_entries = dict(other_pkg.walk()) for lk, entry in self.walk(): other_entry = other_entries.pop(lk, None) if other_entry is None: deleted.append(lk) elif entry != other_entry: modified.append(lk) added = list(sorted(other_entries)) return added, modified, deleted @ApiTelemetry("package.map") def map(self, f, include_directories=False): """ Performs a user-specified operation on each entry in the package. Args: f(x, y): function The function to be applied to each package entry. It should take two inputs, a logical key and a PackageEntry. include_directories: bool Whether or not to include directory entries in the map. Returns: list The list of results generated by the map. """ return self._map(f, include_directories=include_directories) def _map(self, f, include_directories=False): if include_directories: for lk, _ in self._walk_dir_meta(): yield f(lk, self[lk.rstrip("/")]) for lk, entity in self.walk(): yield f(lk, entity) @ApiTelemetry("package.filter") def filter(self, f, include_directories=False): """ Applies a user-specified operation to each entry in the package, removing results that evaluate to False from the output. Args: f(x, y): function The function to be applied to each package entry. It should take two inputs, a logical key and a PackageEntry. This function should return a boolean. include_directories: bool Whether or not to include directory entries in the map. Returns: A new package with entries that evaluated to False removed """ return self._filter(f=f, include_directories=include_directories) def _filter(self, f, include_directories=False): p = Package() excluded_dirs = set() if include_directories: for lk, _ in self._walk_dir_meta(): if not f(lk, self[lk.rstrip("/")]): excluded_dirs.add(lk) for lk, entity in self.walk(): if (not any(p in excluded_dirs for p in pathlib.PurePosixPath(lk).parents) and f(lk, entity)): p._set(lk, entity) return p def verify(self, src, extra_files_ok=False): """ Check if the contents of the given directory matches the package manifest. Args: src(str): URL of the directory extra_files_ok(bool): Whether extra files in the directory should cause a failure. Returns: True if the package matches the directory; False otherwise. """ src = PhysicalKey.from_url(fix_url(src)) src_dict = dict(list_url(src)) url_list = [] size_list = [] for logical_key, entry in self.walk(): src_size = src_dict.pop(logical_key, None) if src_size is None: return False if entry.size != src_size: return False entry_url = src.join(logical_key) url_list.append(entry_url) size_list.append(src_size) if src_dict and not extra_files_ok: return False hash_list = calculate_sha256(url_list, size_list) for (logical_key, entry), url_hash in zip(self.walk(), hash_list): if entry.hash['value'] != url_hash: return False return True
1
18,409
Why not just import this from `data_transfer`? As a rule copying the same code twice is not a good idea. Also: please run `pylint` on all files in this PR.
quiltdata-quilt
py
@@ -99,7 +99,7 @@ module Beaker it "can correctly combine arguments from different sources" do FakeFS.deactivate! args = ["-h", hosts_path, "--debug", "--type", "git", "--install", "PUPPET/1.0,HIERA/hello"] - expect(parser.parse_args(args)).to be === {:hosts_file=>hosts_path, :options_file=>nil, :type=>"git", :provision=>true, :preserve_hosts=>false, :root_keys=>false, :quiet=>false, :xml=>false, :color=>true, :debug=>true, :dry_run=>false, :fail_mode=>nil, :timesync=>false, :repo_proxy=>false, :add_el_extras=>false, :consoleport=>443, :pe_dir=>"/opt/enterprise/dists", :pe_version_file=>"LATEST", :pe_version_file_win=>"LATEST-win", :dot_fog=>"#{home}/.fog", :ec2_yaml=>"config/image_templates/ec2.yaml", :ssh=>{:config=>false, :paranoid=>false, :timeout=>300, :auth_methods=>["publickey"], :port=>22, :forward_agent=>true, :keys=>["#{home}/.ssh/id_rsa"], :user_known_hosts_file=>"#{home}/.ssh/known_hosts"}, :install=>["git://github.com/puppetlabs/puppet.git#1.0", "git://github.com/puppetlabs/hiera.git#hello"], :HOSTS=>{:"pe-ubuntu-lucid"=>{:roles=>["agent", "dashboard", "database", "master"], :vmname=>"pe-ubuntu-lucid", :platform=>"ubuntu-10.04-i386", :snapshot=>"clean-w-keys", :hypervisor=>"fusion"}, :"pe-centos6"=>{:roles=>["agent"], :vmname=>"pe-centos6", :platform=>"el-6-i386", :hypervisor=>"fusion", :snapshot=>"clean-w-keys"}}, :nfs_server=>"none", :puppet_ver=>nil, :facter_ver=>nil, :hiera_ver=>nil, :hiera_puppet_ver=>nil, :helper=>[], :load_path=>[], :tests=>[], :pre_suite=>[], :post_suite=>[], :modules=>[]} + expect(parser.parse_args(args)).to be === {:hosts_file=>"/Users/anode/beaker/spec/beaker/options/data/hosts.cfg", :options_file=>nil, :type=>"git", :provision=>true, :preserve_hosts=>false, :root_keys=>false, :quiet=>false, :xml=>false, :color=>true, :debug=>true, :dry_run=>false, :fail_mode=>nil, :timesync=>false, :repo_proxy=>false, :add_el_extras=>false, :consoleport=>443, :pe_dir=>"/opt/enterprise/dists", :pe_version_file=>"LATEST", :pe_version_file_win=>"LATEST-win", :dot_fog=>"/Users/anode/.fog", :ec2_yaml=>"config/image_templates/ec2.yaml", :ssh=>{:config=>false, :paranoid=>false, :timeout=>300, :auth_methods=>["publickey"], :port=>22, :forward_agent=>true, :keys=>["/Users/anode/.ssh/id_rsa"], :user_known_hosts_file=>"/Users/anode/.ssh/known_hosts"}, :install=>["git://github.com/puppetlabs/puppet.git#1.0", "git://github.com/puppetlabs/hiera.git#hello"], :HOSTS=>{:"pe-ubuntu-lucid"=>{:roles=>["agent", "dashboard", "database", "master"], :vmname=>"pe-ubuntu-lucid", :platform=>"ubuntu-10.04-i386", :snapshot=>"clean-w-keys", :hypervisor=>"fusion"}, :"pe-centos6"=>{:roles=>["agent"], :vmname=>"pe-centos6", :platform=>"el-6-i386", :hypervisor=>"fusion", :snapshot=>"clean-w-keys"}}, :nfs_server=>"none", :helper=>[], :load_path=>[], :tests=>[], :pre_suite=>[], :post_suite=>[], :modules=>[]} end it "ensures that file-mode is one of fast/stop" do
1
require "spec_helper" module Beaker module Options describe Parser do let(:parser) { Parser.new } let(:opts_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "opts.txt") } let(:hosts_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "hosts.cfg") } let(:badyaml_path) { File.join(File.expand_path(File.dirname(__FILE__)), "data", "badyaml.cfg") } let(:home) {ENV['HOME']} it "supports usage function" do expect{parser.usage}.to_not raise_error end repo = 'git://github.com/puppetlabs' it "has repo set to #{repo}" do expect(parser.repo).to be === "#{repo}" end #test parse_install_options it "can transform --install PUPPET/3.1 into #{repo}/puppet.git#3.1" do opts = ["PUPPET/3.1"] expect(parser.parse_git_repos(opts)).to be === ["#{repo}/puppet.git#3.1"] end it "can transform --install FACTER/v.1.0 into #{repo}/facter.git#v.1.0" do opts = ["FACTER/v.1.0"] expect(parser.parse_git_repos(opts)).to be === ["#{repo}/facter.git#v.1.0"] end it "can transform --install HIERA/xyz into #{repo}/hiera.git#xyz" do opts = ["HIERA/xyz"] expect(parser.parse_git_repos(opts)).to be === ["#{repo}/hiera.git#xyz"] end it "can transform --install HIERA-PUPPET/path/to/repo into #{repo}/hiera-puppet.git#path/to/repo" do opts = ["HIERA-PUPPET/path/to/repo"] expect(parser.parse_git_repos(opts)).to be === ["#{repo}/hiera-puppet.git#path/to/repo"] end it "can transform --install PUPPET/3.1,FACTER/v.1.0 into #{repo}/puppet.git#3.1,#{repo}/facter.git#v.1.0" do opts = ["PUPPET/3.1", "FACTER/v.1.0"] expect(parser.parse_git_repos(opts)).to be === ["#{repo}/puppet.git#3.1", "#{repo}/facter.git#v.1.0"] end it "can leave --install git://github.com/puppetlabs/puppet.git#my/full/path alone" do opts = ["git://github.com/puppetlabs/puppet.git#my/full/path"] expect(parser.parse_git_repos(opts)).to be === ["git://github.com/puppetlabs/puppet.git#my/full/path"] end #split_arg testing it "can split comma separated list into an array" do arg = "file1,file2,file3" expect(parser.split_arg(arg)).to be === ["file1", "file2", "file3"] end it "can use an existing Array as an acceptable argument" do arg = ["file1", "file2", "file3"] expect(parser.split_arg(arg)).to be === ["file1", "file2", "file3"] end it "can generate an array from a single value" do arg = "i'mjustastring" expect(parser.split_arg(arg)).to be === ["i'mjustastring"] end context 'testing path traversing', :use_fakefs => true do let(:test_dir) { 'tmp/tests' } let(:paths) { create_files(@files) } let(:rb_test) { File.expand_path(test_dir + '/my_ruby_file.rb') } let(:pl_test) { File.expand_path(test_dir + '/my_perl_file.pl') } let(:sh_test) { File.expand_path(test_dir + '/my_shell_file.sh') } let(:rb_other) { File.expand_path(test_dir + '/other/my_other_ruby_file.rb') } it 'only collects ruby files as test files' do @files = [ rb_test, pl_test, sh_test, rb_other ] paths expect(parser.file_list([File.expand_path(test_dir)])).to be === [rb_test, rb_other] end it 'raises an error when no ruby files are found' do @files = [ pl_test, sh_test ] paths expect{parser.file_list([File.expand_path(test_dir)])}.to raise_error(ArgumentError) end it 'raises an error when no paths are specified for searching' do @files = '' expect{parser.file_list('')}.to raise_error(ArgumentError) end end #test yaml file checking it "raises error on improperly formatted yaml file" do FakeFS.deactivate! expect{parser.check_yaml_file(badyaml_path)}.to raise_error(ArgumentError) end it "raises an error when a yaml file is missing" do FakeFS.deactivate! expect{parser.check_yaml_file("not a path")}.to raise_error(ArgumentError) end it "can correctly combine arguments from different sources" do FakeFS.deactivate! args = ["-h", hosts_path, "--debug", "--type", "git", "--install", "PUPPET/1.0,HIERA/hello"] expect(parser.parse_args(args)).to be === {:hosts_file=>hosts_path, :options_file=>nil, :type=>"git", :provision=>true, :preserve_hosts=>false, :root_keys=>false, :quiet=>false, :xml=>false, :color=>true, :debug=>true, :dry_run=>false, :fail_mode=>nil, :timesync=>false, :repo_proxy=>false, :add_el_extras=>false, :consoleport=>443, :pe_dir=>"/opt/enterprise/dists", :pe_version_file=>"LATEST", :pe_version_file_win=>"LATEST-win", :dot_fog=>"#{home}/.fog", :ec2_yaml=>"config/image_templates/ec2.yaml", :ssh=>{:config=>false, :paranoid=>false, :timeout=>300, :auth_methods=>["publickey"], :port=>22, :forward_agent=>true, :keys=>["#{home}/.ssh/id_rsa"], :user_known_hosts_file=>"#{home}/.ssh/known_hosts"}, :install=>["git://github.com/puppetlabs/puppet.git#1.0", "git://github.com/puppetlabs/hiera.git#hello"], :HOSTS=>{:"pe-ubuntu-lucid"=>{:roles=>["agent", "dashboard", "database", "master"], :vmname=>"pe-ubuntu-lucid", :platform=>"ubuntu-10.04-i386", :snapshot=>"clean-w-keys", :hypervisor=>"fusion"}, :"pe-centos6"=>{:roles=>["agent"], :vmname=>"pe-centos6", :platform=>"el-6-i386", :hypervisor=>"fusion", :snapshot=>"clean-w-keys"}}, :nfs_server=>"none", :puppet_ver=>nil, :facter_ver=>nil, :hiera_ver=>nil, :hiera_puppet_ver=>nil, :helper=>[], :load_path=>[], :tests=>[], :pre_suite=>[], :post_suite=>[], :modules=>[]} end it "ensures that file-mode is one of fast/stop" do FakeFS.deactivate! args = ["-h", hosts_path, "--debug", "--fail-mode", "slow"] expect{parser.parse_args(args)}.to raise_error(ArgumentError) end it "ensures that type is one of pe/git" do FakeFS.deactivate! args = ["-h", hosts_path, "--debug", "--type", "unkowns"] expect{parser.parse_args(args)}.to raise_error(ArgumentError) end end end end
1
4,595
You've got an absolute path here referencing your home dir :-).
voxpupuli-beaker
rb
@@ -526,10 +526,10 @@ public final class JavaParserMetaModel { unaryExprMetaModel.getDeclaredPropertyMetaModels().add(unaryExprMetaModel.expressionPropertyMetaModel); unaryExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "operator", com.github.javaparser.ast.expr.UnaryExpr.Operator.class, Optional.empty(), false, false, false, false, false); unaryExprMetaModel.getDeclaredPropertyMetaModels().add(unaryExprMetaModel.operatorPropertyMetaModel); - unaryExprMetaModel.prefixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "prefix", boolean.class, Optional.empty(), false, true, false, false, false); - unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.prefixPropertyMetaModel); unaryExprMetaModel.postfixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "postfix", boolean.class, Optional.empty(), false, true, false, false, false); unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.postfixPropertyMetaModel); + unaryExprMetaModel.prefixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "prefix", boolean.class, Optional.empty(), false, true, false, false, false); + unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.prefixPropertyMetaModel); variableDeclarationExprMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.annotationsPropertyMetaModel); variableDeclarationExprMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false);
1
package com.github.javaparser.metamodel; import com.github.javaparser.ast.*; import java.util.ArrayList; import java.util.List; import java.util.Optional; /** * The model contains meta-data about all nodes in the AST. */ public final class JavaParserMetaModel { private JavaParserMetaModel() { } private static final List<BaseNodeMetaModel> nodeMetaModels = new ArrayList<>(); private static void initializeConstructorParameters() { stringLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel); arrayCreationLevelMetaModel.getConstructorParameters().add(arrayCreationLevelMetaModel.dimensionPropertyMetaModel); arrayCreationLevelMetaModel.getConstructorParameters().add(arrayCreationLevelMetaModel.annotationsPropertyMetaModel); compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.packageDeclarationPropertyMetaModel); compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.importsPropertyMetaModel); compilationUnitMetaModel.getConstructorParameters().add(compilationUnitMetaModel.typesPropertyMetaModel); packageDeclarationMetaModel.getConstructorParameters().add(packageDeclarationMetaModel.annotationsPropertyMetaModel); packageDeclarationMetaModel.getConstructorParameters().add(packageDeclarationMetaModel.namePropertyMetaModel); annotationDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel); annotationDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); annotationDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel); annotationDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel); annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.modifiersPropertyMetaModel); annotationMemberDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.typePropertyMetaModel); annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.namePropertyMetaModel); annotationMemberDeclarationMetaModel.getConstructorParameters().add(annotationMemberDeclarationMetaModel.defaultValuePropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.isInterfacePropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.typeParametersPropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.extendedTypesPropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(classOrInterfaceDeclarationMetaModel.implementedTypesPropertyMetaModel); classOrInterfaceDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.modifiersPropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.namePropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.parametersPropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel); constructorDeclarationMetaModel.getConstructorParameters().add(constructorDeclarationMetaModel.bodyPropertyMetaModel); enumConstantDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); enumConstantDeclarationMetaModel.getConstructorParameters().add(enumConstantDeclarationMetaModel.namePropertyMetaModel); enumConstantDeclarationMetaModel.getConstructorParameters().add(enumConstantDeclarationMetaModel.argumentsPropertyMetaModel); enumConstantDeclarationMetaModel.getConstructorParameters().add(enumConstantDeclarationMetaModel.classBodyPropertyMetaModel); enumDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.modifiersPropertyMetaModel); enumDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); enumDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.namePropertyMetaModel); enumDeclarationMetaModel.getConstructorParameters().add(enumDeclarationMetaModel.implementedTypesPropertyMetaModel); enumDeclarationMetaModel.getConstructorParameters().add(enumDeclarationMetaModel.entriesPropertyMetaModel); enumDeclarationMetaModel.getConstructorParameters().add(typeDeclarationMetaModel.membersPropertyMetaModel); fieldDeclarationMetaModel.getConstructorParameters().add(fieldDeclarationMetaModel.modifiersPropertyMetaModel); fieldDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); fieldDeclarationMetaModel.getConstructorParameters().add(fieldDeclarationMetaModel.variablesPropertyMetaModel); initializerDeclarationMetaModel.getConstructorParameters().add(initializerDeclarationMetaModel.isStaticPropertyMetaModel); initializerDeclarationMetaModel.getConstructorParameters().add(initializerDeclarationMetaModel.bodyPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.modifiersPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(methodDeclarationMetaModel.typePropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.namePropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(methodDeclarationMetaModel.isDefaultPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.parametersPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel); methodDeclarationMetaModel.getConstructorParameters().add(methodDeclarationMetaModel.bodyPropertyMetaModel); parameterMetaModel.getConstructorParameters().add(parameterMetaModel.modifiersPropertyMetaModel); parameterMetaModel.getConstructorParameters().add(parameterMetaModel.annotationsPropertyMetaModel); parameterMetaModel.getConstructorParameters().add(parameterMetaModel.typePropertyMetaModel); parameterMetaModel.getConstructorParameters().add(parameterMetaModel.isVarArgsPropertyMetaModel); parameterMetaModel.getConstructorParameters().add(parameterMetaModel.namePropertyMetaModel); variableDeclaratorMetaModel.getConstructorParameters().add(variableDeclaratorMetaModel.typePropertyMetaModel); variableDeclaratorMetaModel.getConstructorParameters().add(variableDeclaratorMetaModel.namePropertyMetaModel); variableDeclaratorMetaModel.getConstructorParameters().add(variableDeclaratorMetaModel.initializerPropertyMetaModel); blockCommentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel); javadocCommentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel); lineCommentMetaModel.getConstructorParameters().add(commentMetaModel.contentPropertyMetaModel); arrayAccessExprMetaModel.getConstructorParameters().add(arrayAccessExprMetaModel.namePropertyMetaModel); arrayAccessExprMetaModel.getConstructorParameters().add(arrayAccessExprMetaModel.indexPropertyMetaModel); arrayCreationExprMetaModel.getConstructorParameters().add(arrayCreationExprMetaModel.elementTypePropertyMetaModel); arrayCreationExprMetaModel.getConstructorParameters().add(arrayCreationExprMetaModel.levelsPropertyMetaModel); arrayCreationExprMetaModel.getConstructorParameters().add(arrayCreationExprMetaModel.initializerPropertyMetaModel); arrayInitializerExprMetaModel.getConstructorParameters().add(arrayInitializerExprMetaModel.valuesPropertyMetaModel); assignExprMetaModel.getConstructorParameters().add(assignExprMetaModel.targetPropertyMetaModel); assignExprMetaModel.getConstructorParameters().add(assignExprMetaModel.valuePropertyMetaModel); assignExprMetaModel.getConstructorParameters().add(assignExprMetaModel.operatorPropertyMetaModel); binaryExprMetaModel.getConstructorParameters().add(binaryExprMetaModel.leftPropertyMetaModel); binaryExprMetaModel.getConstructorParameters().add(binaryExprMetaModel.rightPropertyMetaModel); binaryExprMetaModel.getConstructorParameters().add(binaryExprMetaModel.operatorPropertyMetaModel); booleanLiteralExprMetaModel.getConstructorParameters().add(booleanLiteralExprMetaModel.valuePropertyMetaModel); castExprMetaModel.getConstructorParameters().add(castExprMetaModel.typePropertyMetaModel); castExprMetaModel.getConstructorParameters().add(castExprMetaModel.expressionPropertyMetaModel); charLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel); classExprMetaModel.getConstructorParameters().add(classExprMetaModel.typePropertyMetaModel); conditionalExprMetaModel.getConstructorParameters().add(conditionalExprMetaModel.conditionPropertyMetaModel); conditionalExprMetaModel.getConstructorParameters().add(conditionalExprMetaModel.thenExprPropertyMetaModel); conditionalExprMetaModel.getConstructorParameters().add(conditionalExprMetaModel.elseExprPropertyMetaModel); doubleLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel); enclosedExprMetaModel.getConstructorParameters().add(enclosedExprMetaModel.innerPropertyMetaModel); fieldAccessExprMetaModel.getConstructorParameters().add(fieldAccessExprMetaModel.scopePropertyMetaModel); fieldAccessExprMetaModel.getConstructorParameters().add(fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel); fieldAccessExprMetaModel.getConstructorParameters().add(fieldAccessExprMetaModel.namePropertyMetaModel); instanceOfExprMetaModel.getConstructorParameters().add(instanceOfExprMetaModel.expressionPropertyMetaModel); instanceOfExprMetaModel.getConstructorParameters().add(instanceOfExprMetaModel.typePropertyMetaModel); integerLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel); lambdaExprMetaModel.getConstructorParameters().add(lambdaExprMetaModel.parametersPropertyMetaModel); lambdaExprMetaModel.getConstructorParameters().add(lambdaExprMetaModel.bodyPropertyMetaModel); lambdaExprMetaModel.getConstructorParameters().add(lambdaExprMetaModel.isEnclosingParametersPropertyMetaModel); longLiteralExprMetaModel.getConstructorParameters().add(literalStringValueExprMetaModel.valuePropertyMetaModel); markerAnnotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel); memberValuePairMetaModel.getConstructorParameters().add(memberValuePairMetaModel.namePropertyMetaModel); memberValuePairMetaModel.getConstructorParameters().add(memberValuePairMetaModel.valuePropertyMetaModel); methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.scopePropertyMetaModel); methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.typeArgumentsPropertyMetaModel); methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.namePropertyMetaModel); methodCallExprMetaModel.getConstructorParameters().add(methodCallExprMetaModel.argumentsPropertyMetaModel); methodReferenceExprMetaModel.getConstructorParameters().add(methodReferenceExprMetaModel.scopePropertyMetaModel); methodReferenceExprMetaModel.getConstructorParameters().add(methodReferenceExprMetaModel.typeArgumentsPropertyMetaModel); methodReferenceExprMetaModel.getConstructorParameters().add(methodReferenceExprMetaModel.identifierPropertyMetaModel); nameExprMetaModel.getConstructorParameters().add(nameExprMetaModel.namePropertyMetaModel); nameMetaModel.getConstructorParameters().add(nameMetaModel.qualifierPropertyMetaModel); nameMetaModel.getConstructorParameters().add(nameMetaModel.identifierPropertyMetaModel); normalAnnotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel); normalAnnotationExprMetaModel.getConstructorParameters().add(normalAnnotationExprMetaModel.pairsPropertyMetaModel); objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.scopePropertyMetaModel); objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.typePropertyMetaModel); objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.typeArgumentsPropertyMetaModel); objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.argumentsPropertyMetaModel); objectCreationExprMetaModel.getConstructorParameters().add(objectCreationExprMetaModel.anonymousClassBodyPropertyMetaModel); simpleNameMetaModel.getConstructorParameters().add(simpleNameMetaModel.identifierPropertyMetaModel); singleMemberAnnotationExprMetaModel.getConstructorParameters().add(annotationExprMetaModel.namePropertyMetaModel); singleMemberAnnotationExprMetaModel.getConstructorParameters().add(singleMemberAnnotationExprMetaModel.memberValuePropertyMetaModel); superExprMetaModel.getConstructorParameters().add(superExprMetaModel.classExprPropertyMetaModel); thisExprMetaModel.getConstructorParameters().add(thisExprMetaModel.classExprPropertyMetaModel); typeExprMetaModel.getConstructorParameters().add(typeExprMetaModel.typePropertyMetaModel); unaryExprMetaModel.getConstructorParameters().add(unaryExprMetaModel.expressionPropertyMetaModel); unaryExprMetaModel.getConstructorParameters().add(unaryExprMetaModel.operatorPropertyMetaModel); variableDeclarationExprMetaModel.getConstructorParameters().add(variableDeclarationExprMetaModel.modifiersPropertyMetaModel); variableDeclarationExprMetaModel.getConstructorParameters().add(variableDeclarationExprMetaModel.annotationsPropertyMetaModel); variableDeclarationExprMetaModel.getConstructorParameters().add(variableDeclarationExprMetaModel.variablesPropertyMetaModel); importDeclarationMetaModel.getConstructorParameters().add(importDeclarationMetaModel.namePropertyMetaModel); importDeclarationMetaModel.getConstructorParameters().add(importDeclarationMetaModel.isStaticPropertyMetaModel); importDeclarationMetaModel.getConstructorParameters().add(importDeclarationMetaModel.isAsteriskPropertyMetaModel); assertStmtMetaModel.getConstructorParameters().add(assertStmtMetaModel.checkPropertyMetaModel); assertStmtMetaModel.getConstructorParameters().add(assertStmtMetaModel.messagePropertyMetaModel); blockStmtMetaModel.getConstructorParameters().add(blockStmtMetaModel.statementsPropertyMetaModel); breakStmtMetaModel.getConstructorParameters().add(breakStmtMetaModel.labelPropertyMetaModel); catchClauseMetaModel.getConstructorParameters().add(catchClauseMetaModel.parameterPropertyMetaModel); catchClauseMetaModel.getConstructorParameters().add(catchClauseMetaModel.bodyPropertyMetaModel); continueStmtMetaModel.getConstructorParameters().add(continueStmtMetaModel.labelPropertyMetaModel); doStmtMetaModel.getConstructorParameters().add(doStmtMetaModel.bodyPropertyMetaModel); doStmtMetaModel.getConstructorParameters().add(doStmtMetaModel.conditionPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.typeArgumentsPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.isThisPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.expressionPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.getConstructorParameters().add(explicitConstructorInvocationStmtMetaModel.argumentsPropertyMetaModel); expressionStmtMetaModel.getConstructorParameters().add(expressionStmtMetaModel.expressionPropertyMetaModel); foreachStmtMetaModel.getConstructorParameters().add(foreachStmtMetaModel.variablePropertyMetaModel); foreachStmtMetaModel.getConstructorParameters().add(foreachStmtMetaModel.iterablePropertyMetaModel); foreachStmtMetaModel.getConstructorParameters().add(foreachStmtMetaModel.bodyPropertyMetaModel); forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.initializationPropertyMetaModel); forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.comparePropertyMetaModel); forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.updatePropertyMetaModel); forStmtMetaModel.getConstructorParameters().add(forStmtMetaModel.bodyPropertyMetaModel); ifStmtMetaModel.getConstructorParameters().add(ifStmtMetaModel.conditionPropertyMetaModel); ifStmtMetaModel.getConstructorParameters().add(ifStmtMetaModel.thenStmtPropertyMetaModel); ifStmtMetaModel.getConstructorParameters().add(ifStmtMetaModel.elseStmtPropertyMetaModel); labeledStmtMetaModel.getConstructorParameters().add(labeledStmtMetaModel.labelPropertyMetaModel); labeledStmtMetaModel.getConstructorParameters().add(labeledStmtMetaModel.statementPropertyMetaModel); returnStmtMetaModel.getConstructorParameters().add(returnStmtMetaModel.expressionPropertyMetaModel); switchEntryStmtMetaModel.getConstructorParameters().add(switchEntryStmtMetaModel.labelPropertyMetaModel); switchEntryStmtMetaModel.getConstructorParameters().add(switchEntryStmtMetaModel.statementsPropertyMetaModel); switchStmtMetaModel.getConstructorParameters().add(switchStmtMetaModel.selectorPropertyMetaModel); switchStmtMetaModel.getConstructorParameters().add(switchStmtMetaModel.entriesPropertyMetaModel); synchronizedStmtMetaModel.getConstructorParameters().add(synchronizedStmtMetaModel.expressionPropertyMetaModel); synchronizedStmtMetaModel.getConstructorParameters().add(synchronizedStmtMetaModel.bodyPropertyMetaModel); throwStmtMetaModel.getConstructorParameters().add(throwStmtMetaModel.expressionPropertyMetaModel); tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.resourcesPropertyMetaModel); tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.tryBlockPropertyMetaModel); tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.catchClausesPropertyMetaModel); tryStmtMetaModel.getConstructorParameters().add(tryStmtMetaModel.finallyBlockPropertyMetaModel); localClassDeclarationStmtMetaModel.getConstructorParameters().add(localClassDeclarationStmtMetaModel.classDeclarationPropertyMetaModel); whileStmtMetaModel.getConstructorParameters().add(whileStmtMetaModel.conditionPropertyMetaModel); whileStmtMetaModel.getConstructorParameters().add(whileStmtMetaModel.bodyPropertyMetaModel); arrayTypeMetaModel.getConstructorParameters().add(arrayTypeMetaModel.componentTypePropertyMetaModel); arrayTypeMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel); classOrInterfaceTypeMetaModel.getConstructorParameters().add(classOrInterfaceTypeMetaModel.scopePropertyMetaModel); classOrInterfaceTypeMetaModel.getConstructorParameters().add(classOrInterfaceTypeMetaModel.namePropertyMetaModel); classOrInterfaceTypeMetaModel.getConstructorParameters().add(classOrInterfaceTypeMetaModel.typeArgumentsPropertyMetaModel); intersectionTypeMetaModel.getConstructorParameters().add(intersectionTypeMetaModel.elementsPropertyMetaModel); primitiveTypeMetaModel.getConstructorParameters().add(primitiveTypeMetaModel.typePropertyMetaModel); typeParameterMetaModel.getConstructorParameters().add(typeParameterMetaModel.namePropertyMetaModel); typeParameterMetaModel.getConstructorParameters().add(typeParameterMetaModel.typeBoundPropertyMetaModel); typeParameterMetaModel.getConstructorParameters().add(typeMetaModel.annotationsPropertyMetaModel); unionTypeMetaModel.getConstructorParameters().add(unionTypeMetaModel.elementsPropertyMetaModel); wildcardTypeMetaModel.getConstructorParameters().add(wildcardTypeMetaModel.extendedTypePropertyMetaModel); wildcardTypeMetaModel.getConstructorParameters().add(wildcardTypeMetaModel.superTypePropertyMetaModel); } public static List<BaseNodeMetaModel> getNodeMetaModels() { return nodeMetaModels; } private static void initializeNodeMetaModels() { nodeMetaModels.add(annotationDeclarationMetaModel); nodeMetaModels.add(annotationExprMetaModel); nodeMetaModels.add(annotationMemberDeclarationMetaModel); nodeMetaModels.add(arrayAccessExprMetaModel); nodeMetaModels.add(arrayCreationExprMetaModel); nodeMetaModels.add(arrayCreationLevelMetaModel); nodeMetaModels.add(arrayInitializerExprMetaModel); nodeMetaModels.add(arrayTypeMetaModel); nodeMetaModels.add(assertStmtMetaModel); nodeMetaModels.add(assignExprMetaModel); nodeMetaModels.add(binaryExprMetaModel); nodeMetaModels.add(blockCommentMetaModel); nodeMetaModels.add(blockStmtMetaModel); nodeMetaModels.add(bodyDeclarationMetaModel); nodeMetaModels.add(booleanLiteralExprMetaModel); nodeMetaModels.add(breakStmtMetaModel); nodeMetaModels.add(callableDeclarationMetaModel); nodeMetaModels.add(castExprMetaModel); nodeMetaModels.add(catchClauseMetaModel); nodeMetaModels.add(charLiteralExprMetaModel); nodeMetaModels.add(classExprMetaModel); nodeMetaModels.add(classOrInterfaceDeclarationMetaModel); nodeMetaModels.add(classOrInterfaceTypeMetaModel); nodeMetaModels.add(commentMetaModel); nodeMetaModels.add(compilationUnitMetaModel); nodeMetaModels.add(conditionalExprMetaModel); nodeMetaModels.add(constructorDeclarationMetaModel); nodeMetaModels.add(continueStmtMetaModel); nodeMetaModels.add(doStmtMetaModel); nodeMetaModels.add(doubleLiteralExprMetaModel); nodeMetaModels.add(emptyMemberDeclarationMetaModel); nodeMetaModels.add(emptyStmtMetaModel); nodeMetaModels.add(enclosedExprMetaModel); nodeMetaModels.add(enumConstantDeclarationMetaModel); nodeMetaModels.add(enumDeclarationMetaModel); nodeMetaModels.add(explicitConstructorInvocationStmtMetaModel); nodeMetaModels.add(expressionMetaModel); nodeMetaModels.add(expressionStmtMetaModel); nodeMetaModels.add(fieldAccessExprMetaModel); nodeMetaModels.add(fieldDeclarationMetaModel); nodeMetaModels.add(forStmtMetaModel); nodeMetaModels.add(foreachStmtMetaModel); nodeMetaModels.add(ifStmtMetaModel); nodeMetaModels.add(importDeclarationMetaModel); nodeMetaModels.add(initializerDeclarationMetaModel); nodeMetaModels.add(instanceOfExprMetaModel); nodeMetaModels.add(integerLiteralExprMetaModel); nodeMetaModels.add(intersectionTypeMetaModel); nodeMetaModels.add(javadocCommentMetaModel); nodeMetaModels.add(labeledStmtMetaModel); nodeMetaModels.add(lambdaExprMetaModel); nodeMetaModels.add(lineCommentMetaModel); nodeMetaModels.add(literalExprMetaModel); nodeMetaModels.add(literalStringValueExprMetaModel); nodeMetaModels.add(localClassDeclarationStmtMetaModel); nodeMetaModels.add(longLiteralExprMetaModel); nodeMetaModels.add(markerAnnotationExprMetaModel); nodeMetaModels.add(memberValuePairMetaModel); nodeMetaModels.add(methodCallExprMetaModel); nodeMetaModels.add(methodDeclarationMetaModel); nodeMetaModels.add(methodReferenceExprMetaModel); nodeMetaModels.add(nameExprMetaModel); nodeMetaModels.add(nameMetaModel); nodeMetaModels.add(nodeMetaModel); nodeMetaModels.add(normalAnnotationExprMetaModel); nodeMetaModels.add(nullLiteralExprMetaModel); nodeMetaModels.add(objectCreationExprMetaModel); nodeMetaModels.add(packageDeclarationMetaModel); nodeMetaModels.add(parameterMetaModel); nodeMetaModels.add(primitiveTypeMetaModel); nodeMetaModels.add(referenceTypeMetaModel); nodeMetaModels.add(returnStmtMetaModel); nodeMetaModels.add(simpleNameMetaModel); nodeMetaModels.add(singleMemberAnnotationExprMetaModel); nodeMetaModels.add(statementMetaModel); nodeMetaModels.add(stringLiteralExprMetaModel); nodeMetaModels.add(superExprMetaModel); nodeMetaModels.add(switchEntryStmtMetaModel); nodeMetaModels.add(switchStmtMetaModel); nodeMetaModels.add(synchronizedStmtMetaModel); nodeMetaModels.add(thisExprMetaModel); nodeMetaModels.add(throwStmtMetaModel); nodeMetaModels.add(tryStmtMetaModel); nodeMetaModels.add(typeDeclarationMetaModel); nodeMetaModels.add(typeExprMetaModel); nodeMetaModels.add(typeMetaModel); nodeMetaModels.add(typeParameterMetaModel); nodeMetaModels.add(unaryExprMetaModel); nodeMetaModels.add(unionTypeMetaModel); nodeMetaModels.add(unknownTypeMetaModel); nodeMetaModels.add(variableDeclarationExprMetaModel); nodeMetaModels.add(variableDeclaratorMetaModel); nodeMetaModels.add(voidTypeMetaModel); nodeMetaModels.add(whileStmtMetaModel); nodeMetaModels.add(wildcardTypeMetaModel); } private static void initializePropertyMetaModels() { nodeMetaModel.commentPropertyMetaModel = new PropertyMetaModel(nodeMetaModel, "comment", com.github.javaparser.ast.comments.Comment.class, Optional.of(commentMetaModel), true, false, false, false, false); nodeMetaModel.getDeclaredPropertyMetaModels().add(nodeMetaModel.commentPropertyMetaModel); bodyDeclarationMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(bodyDeclarationMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); bodyDeclarationMetaModel.getDeclaredPropertyMetaModels().add(bodyDeclarationMetaModel.annotationsPropertyMetaModel); callableDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false); callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.modifiersPropertyMetaModel); callableDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.namePropertyMetaModel); callableDeclarationMetaModel.parametersPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "parameters", com.github.javaparser.ast.body.Parameter.class, Optional.of(parameterMetaModel), false, false, true, false, false); callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.parametersPropertyMetaModel); callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "thrownExceptions", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, false, true, false, false); callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.thrownExceptionsPropertyMetaModel); callableDeclarationMetaModel.typeParametersPropertyMetaModel = new PropertyMetaModel(callableDeclarationMetaModel, "typeParameters", com.github.javaparser.ast.type.TypeParameter.class, Optional.of(typeParameterMetaModel), false, false, true, false, false); callableDeclarationMetaModel.getDeclaredPropertyMetaModels().add(callableDeclarationMetaModel.typeParametersPropertyMetaModel); typeMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(typeMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); typeMetaModel.getDeclaredPropertyMetaModels().add(typeMetaModel.annotationsPropertyMetaModel); annotationExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(annotationExprMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false); annotationExprMetaModel.getDeclaredPropertyMetaModels().add(annotationExprMetaModel.namePropertyMetaModel); typeDeclarationMetaModel.membersPropertyMetaModel = new PropertyMetaModel(typeDeclarationMetaModel, "members", com.github.javaparser.ast.body.BodyDeclaration.class, Optional.of(bodyDeclarationMetaModel), false, false, true, false, true); typeDeclarationMetaModel.getDeclaredPropertyMetaModels().add(typeDeclarationMetaModel.membersPropertyMetaModel); typeDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(typeDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false); typeDeclarationMetaModel.getDeclaredPropertyMetaModels().add(typeDeclarationMetaModel.modifiersPropertyMetaModel); typeDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(typeDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); typeDeclarationMetaModel.getDeclaredPropertyMetaModels().add(typeDeclarationMetaModel.namePropertyMetaModel); literalStringValueExprMetaModel.valuePropertyMetaModel = new PropertyMetaModel(literalStringValueExprMetaModel, "value", java.lang.String.class, Optional.empty(), false, false, false, false, false); literalStringValueExprMetaModel.getDeclaredPropertyMetaModels().add(literalStringValueExprMetaModel.valuePropertyMetaModel); arrayCreationLevelMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(arrayCreationLevelMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); arrayCreationLevelMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationLevelMetaModel.annotationsPropertyMetaModel); arrayCreationLevelMetaModel.dimensionPropertyMetaModel = new PropertyMetaModel(arrayCreationLevelMetaModel, "dimension", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); arrayCreationLevelMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationLevelMetaModel.dimensionPropertyMetaModel); compilationUnitMetaModel.importsPropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "imports", com.github.javaparser.ast.ImportDeclaration.class, Optional.of(importDeclarationMetaModel), false, false, true, false, false); compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.importsPropertyMetaModel); compilationUnitMetaModel.packageDeclarationPropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "packageDeclaration", com.github.javaparser.ast.PackageDeclaration.class, Optional.of(packageDeclarationMetaModel), true, false, false, false, false); compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.packageDeclarationPropertyMetaModel); compilationUnitMetaModel.typesPropertyMetaModel = new PropertyMetaModel(compilationUnitMetaModel, "types", com.github.javaparser.ast.body.TypeDeclaration.class, Optional.of(typeDeclarationMetaModel), false, false, true, false, true); compilationUnitMetaModel.getDeclaredPropertyMetaModels().add(compilationUnitMetaModel.typesPropertyMetaModel); packageDeclarationMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(packageDeclarationMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); packageDeclarationMetaModel.getDeclaredPropertyMetaModels().add(packageDeclarationMetaModel.annotationsPropertyMetaModel); packageDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(packageDeclarationMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false); packageDeclarationMetaModel.getDeclaredPropertyMetaModels().add(packageDeclarationMetaModel.namePropertyMetaModel); annotationMemberDeclarationMetaModel.defaultValuePropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "defaultValue", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.defaultValuePropertyMetaModel); annotationMemberDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false); annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.modifiersPropertyMetaModel); annotationMemberDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.namePropertyMetaModel); annotationMemberDeclarationMetaModel.typePropertyMetaModel = new PropertyMetaModel(annotationMemberDeclarationMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); annotationMemberDeclarationMetaModel.getDeclaredPropertyMetaModels().add(annotationMemberDeclarationMetaModel.typePropertyMetaModel); classOrInterfaceDeclarationMetaModel.extendedTypesPropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "extendedTypes", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false); classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.extendedTypesPropertyMetaModel); classOrInterfaceDeclarationMetaModel.implementedTypesPropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "implementedTypes", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false); classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.implementedTypesPropertyMetaModel); classOrInterfaceDeclarationMetaModel.isInterfacePropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "isInterface", boolean.class, Optional.empty(), false, false, false, false, false); classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.isInterfacePropertyMetaModel); classOrInterfaceDeclarationMetaModel.typeParametersPropertyMetaModel = new PropertyMetaModel(classOrInterfaceDeclarationMetaModel, "typeParameters", com.github.javaparser.ast.type.TypeParameter.class, Optional.of(typeParameterMetaModel), false, false, true, false, false); classOrInterfaceDeclarationMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceDeclarationMetaModel.typeParametersPropertyMetaModel); constructorDeclarationMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(constructorDeclarationMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false); constructorDeclarationMetaModel.getDeclaredPropertyMetaModels().add(constructorDeclarationMetaModel.bodyPropertyMetaModel); enumConstantDeclarationMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(enumConstantDeclarationMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); enumConstantDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumConstantDeclarationMetaModel.argumentsPropertyMetaModel); enumConstantDeclarationMetaModel.classBodyPropertyMetaModel = new PropertyMetaModel(enumConstantDeclarationMetaModel, "classBody", com.github.javaparser.ast.body.BodyDeclaration.class, Optional.of(bodyDeclarationMetaModel), false, false, true, false, true); enumConstantDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumConstantDeclarationMetaModel.classBodyPropertyMetaModel); enumConstantDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(enumConstantDeclarationMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); enumConstantDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumConstantDeclarationMetaModel.namePropertyMetaModel); enumDeclarationMetaModel.entriesPropertyMetaModel = new PropertyMetaModel(enumDeclarationMetaModel, "entries", com.github.javaparser.ast.body.EnumConstantDeclaration.class, Optional.of(enumConstantDeclarationMetaModel), false, false, true, false, false); enumDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumDeclarationMetaModel.entriesPropertyMetaModel); enumDeclarationMetaModel.implementedTypesPropertyMetaModel = new PropertyMetaModel(enumDeclarationMetaModel, "implementedTypes", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false); enumDeclarationMetaModel.getDeclaredPropertyMetaModels().add(enumDeclarationMetaModel.implementedTypesPropertyMetaModel); fieldDeclarationMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(fieldDeclarationMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false); fieldDeclarationMetaModel.getDeclaredPropertyMetaModels().add(fieldDeclarationMetaModel.modifiersPropertyMetaModel); fieldDeclarationMetaModel.variablesPropertyMetaModel = new PropertyMetaModel(fieldDeclarationMetaModel, "variables", com.github.javaparser.ast.body.VariableDeclarator.class, Optional.of(variableDeclaratorMetaModel), false, true, true, false, false); fieldDeclarationMetaModel.getDeclaredPropertyMetaModels().add(fieldDeclarationMetaModel.variablesPropertyMetaModel); fieldDeclarationMetaModel.maximumCommonTypePropertyMetaModel = new PropertyMetaModel(fieldDeclarationMetaModel, "maximumCommonType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, true, false, false, false); fieldDeclarationMetaModel.getDerivedPropertyMetaModels().add(fieldDeclarationMetaModel.maximumCommonTypePropertyMetaModel); initializerDeclarationMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(initializerDeclarationMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false); initializerDeclarationMetaModel.getDeclaredPropertyMetaModels().add(initializerDeclarationMetaModel.bodyPropertyMetaModel); initializerDeclarationMetaModel.isStaticPropertyMetaModel = new PropertyMetaModel(initializerDeclarationMetaModel, "isStatic", boolean.class, Optional.empty(), false, false, false, false, false); initializerDeclarationMetaModel.getDeclaredPropertyMetaModels().add(initializerDeclarationMetaModel.isStaticPropertyMetaModel); methodDeclarationMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(methodDeclarationMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), true, false, false, false, false); methodDeclarationMetaModel.getDeclaredPropertyMetaModels().add(methodDeclarationMetaModel.bodyPropertyMetaModel); methodDeclarationMetaModel.isDefaultPropertyMetaModel = new PropertyMetaModel(methodDeclarationMetaModel, "isDefault", boolean.class, Optional.empty(), false, false, false, false, false); methodDeclarationMetaModel.getDeclaredPropertyMetaModels().add(methodDeclarationMetaModel.isDefaultPropertyMetaModel); methodDeclarationMetaModel.typePropertyMetaModel = new PropertyMetaModel(methodDeclarationMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); methodDeclarationMetaModel.getDeclaredPropertyMetaModels().add(methodDeclarationMetaModel.typePropertyMetaModel); parameterMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.annotationsPropertyMetaModel); parameterMetaModel.isVarArgsPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "isVarArgs", boolean.class, Optional.empty(), false, false, false, false, false); parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.isVarArgsPropertyMetaModel); parameterMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false); parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.modifiersPropertyMetaModel); parameterMetaModel.namePropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.namePropertyMetaModel); parameterMetaModel.typePropertyMetaModel = new PropertyMetaModel(parameterMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); parameterMetaModel.getDeclaredPropertyMetaModels().add(parameterMetaModel.typePropertyMetaModel); variableDeclaratorMetaModel.initializerPropertyMetaModel = new PropertyMetaModel(variableDeclaratorMetaModel, "initializer", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, true, false, false, false); variableDeclaratorMetaModel.getDeclaredPropertyMetaModels().add(variableDeclaratorMetaModel.initializerPropertyMetaModel); variableDeclaratorMetaModel.namePropertyMetaModel = new PropertyMetaModel(variableDeclaratorMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); variableDeclaratorMetaModel.getDeclaredPropertyMetaModels().add(variableDeclaratorMetaModel.namePropertyMetaModel); variableDeclaratorMetaModel.typePropertyMetaModel = new PropertyMetaModel(variableDeclaratorMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); variableDeclaratorMetaModel.getDeclaredPropertyMetaModels().add(variableDeclaratorMetaModel.typePropertyMetaModel); commentMetaModel.contentPropertyMetaModel = new PropertyMetaModel(commentMetaModel, "content", java.lang.String.class, Optional.empty(), false, false, false, false, false); commentMetaModel.getDeclaredPropertyMetaModels().add(commentMetaModel.contentPropertyMetaModel); arrayAccessExprMetaModel.indexPropertyMetaModel = new PropertyMetaModel(arrayAccessExprMetaModel, "index", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); arrayAccessExprMetaModel.getDeclaredPropertyMetaModels().add(arrayAccessExprMetaModel.indexPropertyMetaModel); arrayAccessExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(arrayAccessExprMetaModel, "name", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); arrayAccessExprMetaModel.getDeclaredPropertyMetaModels().add(arrayAccessExprMetaModel.namePropertyMetaModel); arrayCreationExprMetaModel.elementTypePropertyMetaModel = new PropertyMetaModel(arrayCreationExprMetaModel, "elementType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); arrayCreationExprMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationExprMetaModel.elementTypePropertyMetaModel); arrayCreationExprMetaModel.initializerPropertyMetaModel = new PropertyMetaModel(arrayCreationExprMetaModel, "initializer", com.github.javaparser.ast.expr.ArrayInitializerExpr.class, Optional.of(arrayInitializerExprMetaModel), true, false, false, false, false); arrayCreationExprMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationExprMetaModel.initializerPropertyMetaModel); arrayCreationExprMetaModel.levelsPropertyMetaModel = new PropertyMetaModel(arrayCreationExprMetaModel, "levels", com.github.javaparser.ast.ArrayCreationLevel.class, Optional.of(arrayCreationLevelMetaModel), false, true, true, false, false); arrayCreationExprMetaModel.getDeclaredPropertyMetaModels().add(arrayCreationExprMetaModel.levelsPropertyMetaModel); arrayInitializerExprMetaModel.valuesPropertyMetaModel = new PropertyMetaModel(arrayInitializerExprMetaModel, "values", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); arrayInitializerExprMetaModel.getDeclaredPropertyMetaModels().add(arrayInitializerExprMetaModel.valuesPropertyMetaModel); assignExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(assignExprMetaModel, "operator", com.github.javaparser.ast.expr.AssignExpr.Operator.class, Optional.empty(), false, false, false, false, false); assignExprMetaModel.getDeclaredPropertyMetaModels().add(assignExprMetaModel.operatorPropertyMetaModel); assignExprMetaModel.targetPropertyMetaModel = new PropertyMetaModel(assignExprMetaModel, "target", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); assignExprMetaModel.getDeclaredPropertyMetaModels().add(assignExprMetaModel.targetPropertyMetaModel); assignExprMetaModel.valuePropertyMetaModel = new PropertyMetaModel(assignExprMetaModel, "value", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); assignExprMetaModel.getDeclaredPropertyMetaModels().add(assignExprMetaModel.valuePropertyMetaModel); binaryExprMetaModel.leftPropertyMetaModel = new PropertyMetaModel(binaryExprMetaModel, "left", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); binaryExprMetaModel.getDeclaredPropertyMetaModels().add(binaryExprMetaModel.leftPropertyMetaModel); binaryExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(binaryExprMetaModel, "operator", com.github.javaparser.ast.expr.BinaryExpr.Operator.class, Optional.empty(), false, false, false, false, false); binaryExprMetaModel.getDeclaredPropertyMetaModels().add(binaryExprMetaModel.operatorPropertyMetaModel); binaryExprMetaModel.rightPropertyMetaModel = new PropertyMetaModel(binaryExprMetaModel, "right", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); binaryExprMetaModel.getDeclaredPropertyMetaModels().add(binaryExprMetaModel.rightPropertyMetaModel); booleanLiteralExprMetaModel.valuePropertyMetaModel = new PropertyMetaModel(booleanLiteralExprMetaModel, "value", boolean.class, Optional.empty(), false, false, false, false, false); booleanLiteralExprMetaModel.getDeclaredPropertyMetaModels().add(booleanLiteralExprMetaModel.valuePropertyMetaModel); castExprMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(castExprMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); castExprMetaModel.getDeclaredPropertyMetaModels().add(castExprMetaModel.expressionPropertyMetaModel); castExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(castExprMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); castExprMetaModel.getDeclaredPropertyMetaModels().add(castExprMetaModel.typePropertyMetaModel); classExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(classExprMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); classExprMetaModel.getDeclaredPropertyMetaModels().add(classExprMetaModel.typePropertyMetaModel); conditionalExprMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(conditionalExprMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); conditionalExprMetaModel.getDeclaredPropertyMetaModels().add(conditionalExprMetaModel.conditionPropertyMetaModel); conditionalExprMetaModel.elseExprPropertyMetaModel = new PropertyMetaModel(conditionalExprMetaModel, "elseExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); conditionalExprMetaModel.getDeclaredPropertyMetaModels().add(conditionalExprMetaModel.elseExprPropertyMetaModel); conditionalExprMetaModel.thenExprPropertyMetaModel = new PropertyMetaModel(conditionalExprMetaModel, "thenExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); conditionalExprMetaModel.getDeclaredPropertyMetaModels().add(conditionalExprMetaModel.thenExprPropertyMetaModel); enclosedExprMetaModel.innerPropertyMetaModel = new PropertyMetaModel(enclosedExprMetaModel, "inner", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); enclosedExprMetaModel.getDeclaredPropertyMetaModels().add(enclosedExprMetaModel.innerPropertyMetaModel); fieldAccessExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.namePropertyMetaModel); fieldAccessExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.scopePropertyMetaModel); fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false); fieldAccessExprMetaModel.getDeclaredPropertyMetaModels().add(fieldAccessExprMetaModel.typeArgumentsPropertyMetaModel); fieldAccessExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(fieldAccessExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, true, false, false, false); fieldAccessExprMetaModel.getDerivedPropertyMetaModels().add(fieldAccessExprMetaModel.usingDiamondOperatorPropertyMetaModel); instanceOfExprMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(instanceOfExprMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); instanceOfExprMetaModel.getDeclaredPropertyMetaModels().add(instanceOfExprMetaModel.expressionPropertyMetaModel); instanceOfExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(instanceOfExprMetaModel, "type", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, false, false, false, true); instanceOfExprMetaModel.getDeclaredPropertyMetaModels().add(instanceOfExprMetaModel.typePropertyMetaModel); lambdaExprMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); lambdaExprMetaModel.getDeclaredPropertyMetaModels().add(lambdaExprMetaModel.bodyPropertyMetaModel); lambdaExprMetaModel.isEnclosingParametersPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "isEnclosingParameters", boolean.class, Optional.empty(), false, false, false, false, false); lambdaExprMetaModel.getDeclaredPropertyMetaModels().add(lambdaExprMetaModel.isEnclosingParametersPropertyMetaModel); lambdaExprMetaModel.parametersPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "parameters", com.github.javaparser.ast.body.Parameter.class, Optional.of(parameterMetaModel), false, false, true, false, false); lambdaExprMetaModel.getDeclaredPropertyMetaModels().add(lambdaExprMetaModel.parametersPropertyMetaModel); lambdaExprMetaModel.expressionBodyPropertyMetaModel = new PropertyMetaModel(lambdaExprMetaModel, "expressionBody", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, true, false, false, false); lambdaExprMetaModel.getDerivedPropertyMetaModels().add(lambdaExprMetaModel.expressionBodyPropertyMetaModel); memberValuePairMetaModel.namePropertyMetaModel = new PropertyMetaModel(memberValuePairMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); memberValuePairMetaModel.getDeclaredPropertyMetaModels().add(memberValuePairMetaModel.namePropertyMetaModel); memberValuePairMetaModel.valuePropertyMetaModel = new PropertyMetaModel(memberValuePairMetaModel, "value", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); memberValuePairMetaModel.getDeclaredPropertyMetaModels().add(memberValuePairMetaModel.valuePropertyMetaModel); methodCallExprMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.argumentsPropertyMetaModel); methodCallExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.namePropertyMetaModel); methodCallExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.scopePropertyMetaModel); methodCallExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false); methodCallExprMetaModel.getDeclaredPropertyMetaModels().add(methodCallExprMetaModel.typeArgumentsPropertyMetaModel); methodCallExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(methodCallExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, true, false, false, false); methodCallExprMetaModel.getDerivedPropertyMetaModels().add(methodCallExprMetaModel.usingDiamondOperatorPropertyMetaModel); methodReferenceExprMetaModel.identifierPropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "identifier", java.lang.String.class, Optional.empty(), false, true, false, false, false); methodReferenceExprMetaModel.getDeclaredPropertyMetaModels().add(methodReferenceExprMetaModel.identifierPropertyMetaModel); methodReferenceExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); methodReferenceExprMetaModel.getDeclaredPropertyMetaModels().add(methodReferenceExprMetaModel.scopePropertyMetaModel); methodReferenceExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false); methodReferenceExprMetaModel.getDeclaredPropertyMetaModels().add(methodReferenceExprMetaModel.typeArgumentsPropertyMetaModel); methodReferenceExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(methodReferenceExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, true, false, false, false); methodReferenceExprMetaModel.getDerivedPropertyMetaModels().add(methodReferenceExprMetaModel.usingDiamondOperatorPropertyMetaModel); nameExprMetaModel.namePropertyMetaModel = new PropertyMetaModel(nameExprMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); nameExprMetaModel.getDeclaredPropertyMetaModels().add(nameExprMetaModel.namePropertyMetaModel); nameMetaModel.identifierPropertyMetaModel = new PropertyMetaModel(nameMetaModel, "identifier", java.lang.String.class, Optional.empty(), false, true, false, false, false); nameMetaModel.getDeclaredPropertyMetaModels().add(nameMetaModel.identifierPropertyMetaModel); nameMetaModel.qualifierPropertyMetaModel = new PropertyMetaModel(nameMetaModel, "qualifier", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), true, false, false, false, false); nameMetaModel.getDeclaredPropertyMetaModels().add(nameMetaModel.qualifierPropertyMetaModel); normalAnnotationExprMetaModel.pairsPropertyMetaModel = new PropertyMetaModel(normalAnnotationExprMetaModel, "pairs", com.github.javaparser.ast.expr.MemberValuePair.class, Optional.of(memberValuePairMetaModel), false, false, true, false, false); normalAnnotationExprMetaModel.getDeclaredPropertyMetaModels().add(normalAnnotationExprMetaModel.pairsPropertyMetaModel); objectCreationExprMetaModel.anonymousClassBodyPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "anonymousClassBody", com.github.javaparser.ast.body.BodyDeclaration.class, Optional.of(bodyDeclarationMetaModel), true, false, true, false, true); objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.anonymousClassBodyPropertyMetaModel); objectCreationExprMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.argumentsPropertyMetaModel); objectCreationExprMetaModel.scopePropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "scope", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.scopePropertyMetaModel); objectCreationExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "type", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, false, false, false); objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.typePropertyMetaModel); objectCreationExprMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false); objectCreationExprMetaModel.getDeclaredPropertyMetaModels().add(objectCreationExprMetaModel.typeArgumentsPropertyMetaModel); objectCreationExprMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(objectCreationExprMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, true, false, false, false); objectCreationExprMetaModel.getDerivedPropertyMetaModels().add(objectCreationExprMetaModel.usingDiamondOperatorPropertyMetaModel); simpleNameMetaModel.identifierPropertyMetaModel = new PropertyMetaModel(simpleNameMetaModel, "identifier", java.lang.String.class, Optional.empty(), false, true, false, false, false); simpleNameMetaModel.getDeclaredPropertyMetaModels().add(simpleNameMetaModel.identifierPropertyMetaModel); singleMemberAnnotationExprMetaModel.memberValuePropertyMetaModel = new PropertyMetaModel(singleMemberAnnotationExprMetaModel, "memberValue", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); singleMemberAnnotationExprMetaModel.getDeclaredPropertyMetaModels().add(singleMemberAnnotationExprMetaModel.memberValuePropertyMetaModel); superExprMetaModel.classExprPropertyMetaModel = new PropertyMetaModel(superExprMetaModel, "classExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); superExprMetaModel.getDeclaredPropertyMetaModels().add(superExprMetaModel.classExprPropertyMetaModel); thisExprMetaModel.classExprPropertyMetaModel = new PropertyMetaModel(thisExprMetaModel, "classExpr", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); thisExprMetaModel.getDeclaredPropertyMetaModels().add(thisExprMetaModel.classExprPropertyMetaModel); typeExprMetaModel.typePropertyMetaModel = new PropertyMetaModel(typeExprMetaModel, "type", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); typeExprMetaModel.getDeclaredPropertyMetaModels().add(typeExprMetaModel.typePropertyMetaModel); unaryExprMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); unaryExprMetaModel.getDeclaredPropertyMetaModels().add(unaryExprMetaModel.expressionPropertyMetaModel); unaryExprMetaModel.operatorPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "operator", com.github.javaparser.ast.expr.UnaryExpr.Operator.class, Optional.empty(), false, false, false, false, false); unaryExprMetaModel.getDeclaredPropertyMetaModels().add(unaryExprMetaModel.operatorPropertyMetaModel); unaryExprMetaModel.prefixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "prefix", boolean.class, Optional.empty(), false, true, false, false, false); unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.prefixPropertyMetaModel); unaryExprMetaModel.postfixPropertyMetaModel = new PropertyMetaModel(unaryExprMetaModel, "postfix", boolean.class, Optional.empty(), false, true, false, false, false); unaryExprMetaModel.getDerivedPropertyMetaModels().add(unaryExprMetaModel.postfixPropertyMetaModel); variableDeclarationExprMetaModel.annotationsPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "annotations", com.github.javaparser.ast.expr.AnnotationExpr.class, Optional.of(annotationExprMetaModel), false, false, true, false, false); variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.annotationsPropertyMetaModel); variableDeclarationExprMetaModel.modifiersPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "modifiers", com.github.javaparser.ast.Modifier.class, Optional.empty(), false, false, false, true, false); variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.modifiersPropertyMetaModel); variableDeclarationExprMetaModel.variablesPropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "variables", com.github.javaparser.ast.body.VariableDeclarator.class, Optional.of(variableDeclaratorMetaModel), false, true, true, false, false); variableDeclarationExprMetaModel.getDeclaredPropertyMetaModels().add(variableDeclarationExprMetaModel.variablesPropertyMetaModel); variableDeclarationExprMetaModel.maximumCommonTypePropertyMetaModel = new PropertyMetaModel(variableDeclarationExprMetaModel, "maximumCommonType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, true, false, false, false); variableDeclarationExprMetaModel.getDerivedPropertyMetaModels().add(variableDeclarationExprMetaModel.maximumCommonTypePropertyMetaModel); importDeclarationMetaModel.isAsteriskPropertyMetaModel = new PropertyMetaModel(importDeclarationMetaModel, "isAsterisk", boolean.class, Optional.empty(), false, false, false, false, false); importDeclarationMetaModel.getDeclaredPropertyMetaModels().add(importDeclarationMetaModel.isAsteriskPropertyMetaModel); importDeclarationMetaModel.isStaticPropertyMetaModel = new PropertyMetaModel(importDeclarationMetaModel, "isStatic", boolean.class, Optional.empty(), false, false, false, false, false); importDeclarationMetaModel.getDeclaredPropertyMetaModels().add(importDeclarationMetaModel.isStaticPropertyMetaModel); importDeclarationMetaModel.namePropertyMetaModel = new PropertyMetaModel(importDeclarationMetaModel, "name", com.github.javaparser.ast.expr.Name.class, Optional.of(nameMetaModel), false, false, false, false, false); importDeclarationMetaModel.getDeclaredPropertyMetaModels().add(importDeclarationMetaModel.namePropertyMetaModel); assertStmtMetaModel.checkPropertyMetaModel = new PropertyMetaModel(assertStmtMetaModel, "check", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); assertStmtMetaModel.getDeclaredPropertyMetaModels().add(assertStmtMetaModel.checkPropertyMetaModel); assertStmtMetaModel.messagePropertyMetaModel = new PropertyMetaModel(assertStmtMetaModel, "message", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); assertStmtMetaModel.getDeclaredPropertyMetaModels().add(assertStmtMetaModel.messagePropertyMetaModel); blockStmtMetaModel.statementsPropertyMetaModel = new PropertyMetaModel(blockStmtMetaModel, "statements", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, true, false, false); blockStmtMetaModel.getDeclaredPropertyMetaModels().add(blockStmtMetaModel.statementsPropertyMetaModel); breakStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(breakStmtMetaModel, "label", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), true, false, false, false, false); breakStmtMetaModel.getDeclaredPropertyMetaModels().add(breakStmtMetaModel.labelPropertyMetaModel); catchClauseMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(catchClauseMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false); catchClauseMetaModel.getDeclaredPropertyMetaModels().add(catchClauseMetaModel.bodyPropertyMetaModel); catchClauseMetaModel.parameterPropertyMetaModel = new PropertyMetaModel(catchClauseMetaModel, "parameter", com.github.javaparser.ast.body.Parameter.class, Optional.of(parameterMetaModel), false, false, false, false, false); catchClauseMetaModel.getDeclaredPropertyMetaModels().add(catchClauseMetaModel.parameterPropertyMetaModel); continueStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(continueStmtMetaModel, "label", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), true, false, false, false, false); continueStmtMetaModel.getDeclaredPropertyMetaModels().add(continueStmtMetaModel.labelPropertyMetaModel); doStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(doStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); doStmtMetaModel.getDeclaredPropertyMetaModels().add(doStmtMetaModel.bodyPropertyMetaModel); doStmtMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(doStmtMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); doStmtMetaModel.getDeclaredPropertyMetaModels().add(doStmtMetaModel.conditionPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.argumentsPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "arguments", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.argumentsPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.expressionPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.isThisPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "isThis", boolean.class, Optional.empty(), false, false, false, false, false); explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.isThisPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false); explicitConstructorInvocationStmtMetaModel.getDeclaredPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.typeArgumentsPropertyMetaModel); explicitConstructorInvocationStmtMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(explicitConstructorInvocationStmtMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, true, false, false, false); explicitConstructorInvocationStmtMetaModel.getDerivedPropertyMetaModels().add(explicitConstructorInvocationStmtMetaModel.usingDiamondOperatorPropertyMetaModel); expressionStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(expressionStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); expressionStmtMetaModel.getDeclaredPropertyMetaModels().add(expressionStmtMetaModel.expressionPropertyMetaModel); foreachStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(foreachStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); foreachStmtMetaModel.getDeclaredPropertyMetaModels().add(foreachStmtMetaModel.bodyPropertyMetaModel); foreachStmtMetaModel.iterablePropertyMetaModel = new PropertyMetaModel(foreachStmtMetaModel, "iterable", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); foreachStmtMetaModel.getDeclaredPropertyMetaModels().add(foreachStmtMetaModel.iterablePropertyMetaModel); foreachStmtMetaModel.variablePropertyMetaModel = new PropertyMetaModel(foreachStmtMetaModel, "variable", com.github.javaparser.ast.expr.VariableDeclarationExpr.class, Optional.of(variableDeclarationExprMetaModel), false, false, false, false, false); foreachStmtMetaModel.getDeclaredPropertyMetaModels().add(foreachStmtMetaModel.variablePropertyMetaModel); forStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.bodyPropertyMetaModel); forStmtMetaModel.comparePropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "compare", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.comparePropertyMetaModel); forStmtMetaModel.initializationPropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "initialization", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.initializationPropertyMetaModel); forStmtMetaModel.updatePropertyMetaModel = new PropertyMetaModel(forStmtMetaModel, "update", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, true, false, false); forStmtMetaModel.getDeclaredPropertyMetaModels().add(forStmtMetaModel.updatePropertyMetaModel); ifStmtMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); ifStmtMetaModel.getDeclaredPropertyMetaModels().add(ifStmtMetaModel.conditionPropertyMetaModel); ifStmtMetaModel.elseStmtPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "elseStmt", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), true, false, false, false, false); ifStmtMetaModel.getDeclaredPropertyMetaModels().add(ifStmtMetaModel.elseStmtPropertyMetaModel); ifStmtMetaModel.thenStmtPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "thenStmt", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); ifStmtMetaModel.getDeclaredPropertyMetaModels().add(ifStmtMetaModel.thenStmtPropertyMetaModel); ifStmtMetaModel.thenBlockPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "thenBlock", boolean.class, Optional.empty(), false, true, false, false, false); ifStmtMetaModel.getDerivedPropertyMetaModels().add(ifStmtMetaModel.thenBlockPropertyMetaModel); ifStmtMetaModel.elseBlockPropertyMetaModel = new PropertyMetaModel(ifStmtMetaModel, "elseBlock", boolean.class, Optional.empty(), false, true, false, false, false); ifStmtMetaModel.getDerivedPropertyMetaModels().add(ifStmtMetaModel.elseBlockPropertyMetaModel); labeledStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(labeledStmtMetaModel, "label", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); labeledStmtMetaModel.getDeclaredPropertyMetaModels().add(labeledStmtMetaModel.labelPropertyMetaModel); labeledStmtMetaModel.statementPropertyMetaModel = new PropertyMetaModel(labeledStmtMetaModel, "statement", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); labeledStmtMetaModel.getDeclaredPropertyMetaModels().add(labeledStmtMetaModel.statementPropertyMetaModel); returnStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(returnStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); returnStmtMetaModel.getDeclaredPropertyMetaModels().add(returnStmtMetaModel.expressionPropertyMetaModel); switchEntryStmtMetaModel.labelPropertyMetaModel = new PropertyMetaModel(switchEntryStmtMetaModel, "label", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), true, false, false, false, false); switchEntryStmtMetaModel.getDeclaredPropertyMetaModels().add(switchEntryStmtMetaModel.labelPropertyMetaModel); switchEntryStmtMetaModel.statementsPropertyMetaModel = new PropertyMetaModel(switchEntryStmtMetaModel, "statements", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, true, false, false); switchEntryStmtMetaModel.getDeclaredPropertyMetaModels().add(switchEntryStmtMetaModel.statementsPropertyMetaModel); switchStmtMetaModel.entriesPropertyMetaModel = new PropertyMetaModel(switchStmtMetaModel, "entries", com.github.javaparser.ast.stmt.SwitchEntryStmt.class, Optional.of(switchEntryStmtMetaModel), false, false, true, false, false); switchStmtMetaModel.getDeclaredPropertyMetaModels().add(switchStmtMetaModel.entriesPropertyMetaModel); switchStmtMetaModel.selectorPropertyMetaModel = new PropertyMetaModel(switchStmtMetaModel, "selector", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); switchStmtMetaModel.getDeclaredPropertyMetaModels().add(switchStmtMetaModel.selectorPropertyMetaModel); synchronizedStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(synchronizedStmtMetaModel, "body", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), false, false, false, false, false); synchronizedStmtMetaModel.getDeclaredPropertyMetaModels().add(synchronizedStmtMetaModel.bodyPropertyMetaModel); synchronizedStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(synchronizedStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); synchronizedStmtMetaModel.getDeclaredPropertyMetaModels().add(synchronizedStmtMetaModel.expressionPropertyMetaModel); throwStmtMetaModel.expressionPropertyMetaModel = new PropertyMetaModel(throwStmtMetaModel, "expression", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); throwStmtMetaModel.getDeclaredPropertyMetaModels().add(throwStmtMetaModel.expressionPropertyMetaModel); tryStmtMetaModel.catchClausesPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "catchClauses", com.github.javaparser.ast.stmt.CatchClause.class, Optional.of(catchClauseMetaModel), false, false, true, false, false); tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.catchClausesPropertyMetaModel); tryStmtMetaModel.finallyBlockPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "finallyBlock", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), true, false, false, false, false); tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.finallyBlockPropertyMetaModel); tryStmtMetaModel.resourcesPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "resources", com.github.javaparser.ast.expr.VariableDeclarationExpr.class, Optional.of(variableDeclarationExprMetaModel), false, false, true, false, false); tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.resourcesPropertyMetaModel); tryStmtMetaModel.tryBlockPropertyMetaModel = new PropertyMetaModel(tryStmtMetaModel, "tryBlock", com.github.javaparser.ast.stmt.BlockStmt.class, Optional.of(blockStmtMetaModel), true, false, false, false, false); tryStmtMetaModel.getDeclaredPropertyMetaModels().add(tryStmtMetaModel.tryBlockPropertyMetaModel); localClassDeclarationStmtMetaModel.classDeclarationPropertyMetaModel = new PropertyMetaModel(localClassDeclarationStmtMetaModel, "classDeclaration", com.github.javaparser.ast.body.ClassOrInterfaceDeclaration.class, Optional.of(classOrInterfaceDeclarationMetaModel), false, false, false, false, false); localClassDeclarationStmtMetaModel.getDeclaredPropertyMetaModels().add(localClassDeclarationStmtMetaModel.classDeclarationPropertyMetaModel); whileStmtMetaModel.bodyPropertyMetaModel = new PropertyMetaModel(whileStmtMetaModel, "body", com.github.javaparser.ast.stmt.Statement.class, Optional.of(statementMetaModel), false, false, false, false, false); whileStmtMetaModel.getDeclaredPropertyMetaModels().add(whileStmtMetaModel.bodyPropertyMetaModel); whileStmtMetaModel.conditionPropertyMetaModel = new PropertyMetaModel(whileStmtMetaModel, "condition", com.github.javaparser.ast.expr.Expression.class, Optional.of(expressionMetaModel), false, false, false, false, false); whileStmtMetaModel.getDeclaredPropertyMetaModels().add(whileStmtMetaModel.conditionPropertyMetaModel); arrayTypeMetaModel.componentTypePropertyMetaModel = new PropertyMetaModel(arrayTypeMetaModel, "componentType", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), false, false, false, false, false); arrayTypeMetaModel.getDeclaredPropertyMetaModels().add(arrayTypeMetaModel.componentTypePropertyMetaModel); classOrInterfaceTypeMetaModel.namePropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); classOrInterfaceTypeMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceTypeMetaModel.namePropertyMetaModel); classOrInterfaceTypeMetaModel.scopePropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "scope", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), true, false, false, false, false); classOrInterfaceTypeMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceTypeMetaModel.scopePropertyMetaModel); classOrInterfaceTypeMetaModel.typeArgumentsPropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "typeArguments", com.github.javaparser.ast.type.Type.class, Optional.of(typeMetaModel), true, false, true, false, false); classOrInterfaceTypeMetaModel.getDeclaredPropertyMetaModels().add(classOrInterfaceTypeMetaModel.typeArgumentsPropertyMetaModel); classOrInterfaceTypeMetaModel.usingDiamondOperatorPropertyMetaModel = new PropertyMetaModel(classOrInterfaceTypeMetaModel, "usingDiamondOperator", boolean.class, Optional.empty(), false, true, false, false, false); classOrInterfaceTypeMetaModel.getDerivedPropertyMetaModels().add(classOrInterfaceTypeMetaModel.usingDiamondOperatorPropertyMetaModel); intersectionTypeMetaModel.elementsPropertyMetaModel = new PropertyMetaModel(intersectionTypeMetaModel, "elements", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, true, true, false, false); intersectionTypeMetaModel.getDeclaredPropertyMetaModels().add(intersectionTypeMetaModel.elementsPropertyMetaModel); primitiveTypeMetaModel.typePropertyMetaModel = new PropertyMetaModel(primitiveTypeMetaModel, "type", com.github.javaparser.ast.type.PrimitiveType.Primitive.class, Optional.empty(), false, false, false, false, false); primitiveTypeMetaModel.getDeclaredPropertyMetaModels().add(primitiveTypeMetaModel.typePropertyMetaModel); typeParameterMetaModel.namePropertyMetaModel = new PropertyMetaModel(typeParameterMetaModel, "name", com.github.javaparser.ast.expr.SimpleName.class, Optional.of(simpleNameMetaModel), false, false, false, false, false); typeParameterMetaModel.getDeclaredPropertyMetaModels().add(typeParameterMetaModel.namePropertyMetaModel); typeParameterMetaModel.typeBoundPropertyMetaModel = new PropertyMetaModel(typeParameterMetaModel, "typeBound", com.github.javaparser.ast.type.ClassOrInterfaceType.class, Optional.of(classOrInterfaceTypeMetaModel), false, false, true, false, false); typeParameterMetaModel.getDeclaredPropertyMetaModels().add(typeParameterMetaModel.typeBoundPropertyMetaModel); unionTypeMetaModel.elementsPropertyMetaModel = new PropertyMetaModel(unionTypeMetaModel, "elements", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), false, true, true, false, false); unionTypeMetaModel.getDeclaredPropertyMetaModels().add(unionTypeMetaModel.elementsPropertyMetaModel); wildcardTypeMetaModel.extendedTypePropertyMetaModel = new PropertyMetaModel(wildcardTypeMetaModel, "extendedType", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), true, false, false, false, false); wildcardTypeMetaModel.getDeclaredPropertyMetaModels().add(wildcardTypeMetaModel.extendedTypePropertyMetaModel); wildcardTypeMetaModel.superTypePropertyMetaModel = new PropertyMetaModel(wildcardTypeMetaModel, "superType", com.github.javaparser.ast.type.ReferenceType.class, Optional.of(referenceTypeMetaModel), true, false, false, false, false); wildcardTypeMetaModel.getDeclaredPropertyMetaModels().add(wildcardTypeMetaModel.superTypePropertyMetaModel); } public static Optional<BaseNodeMetaModel> getNodeMetaModel(Class<? extends Node> c) { for (BaseNodeMetaModel nodeMetaModel : nodeMetaModels) { if (nodeMetaModel.getTypeNameGenerified().equals(c.getSimpleName())) { return Optional.of(nodeMetaModel); } } return Optional.empty(); } public static final NodeMetaModel nodeMetaModel = new NodeMetaModel(Optional.empty()); public static final BodyDeclarationMetaModel bodyDeclarationMetaModel = new BodyDeclarationMetaModel(Optional.of(nodeMetaModel)); public static final CallableDeclarationMetaModel callableDeclarationMetaModel = new CallableDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final StatementMetaModel statementMetaModel = new StatementMetaModel(Optional.of(nodeMetaModel)); public static final ExpressionMetaModel expressionMetaModel = new ExpressionMetaModel(Optional.of(nodeMetaModel)); public static final TypeMetaModel typeMetaModel = new TypeMetaModel(Optional.of(nodeMetaModel)); public static final AnnotationExprMetaModel annotationExprMetaModel = new AnnotationExprMetaModel(Optional.of(expressionMetaModel)); public static final TypeDeclarationMetaModel typeDeclarationMetaModel = new TypeDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final ReferenceTypeMetaModel referenceTypeMetaModel = new ReferenceTypeMetaModel(Optional.of(typeMetaModel)); public static final LiteralExprMetaModel literalExprMetaModel = new LiteralExprMetaModel(Optional.of(expressionMetaModel)); public static final LiteralStringValueExprMetaModel literalStringValueExprMetaModel = new LiteralStringValueExprMetaModel(Optional.of(literalExprMetaModel)); public static final StringLiteralExprMetaModel stringLiteralExprMetaModel = new StringLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel)); public static final ArrayCreationLevelMetaModel arrayCreationLevelMetaModel = new ArrayCreationLevelMetaModel(Optional.of(nodeMetaModel)); public static final CompilationUnitMetaModel compilationUnitMetaModel = new CompilationUnitMetaModel(Optional.of(nodeMetaModel)); public static final PackageDeclarationMetaModel packageDeclarationMetaModel = new PackageDeclarationMetaModel(Optional.of(nodeMetaModel)); public static final AnnotationDeclarationMetaModel annotationDeclarationMetaModel = new AnnotationDeclarationMetaModel(Optional.of(typeDeclarationMetaModel)); public static final AnnotationMemberDeclarationMetaModel annotationMemberDeclarationMetaModel = new AnnotationMemberDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final ClassOrInterfaceDeclarationMetaModel classOrInterfaceDeclarationMetaModel = new ClassOrInterfaceDeclarationMetaModel(Optional.of(typeDeclarationMetaModel)); public static final ConstructorDeclarationMetaModel constructorDeclarationMetaModel = new ConstructorDeclarationMetaModel(Optional.of(callableDeclarationMetaModel)); public static final EmptyMemberDeclarationMetaModel emptyMemberDeclarationMetaModel = new EmptyMemberDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final EnumConstantDeclarationMetaModel enumConstantDeclarationMetaModel = new EnumConstantDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final EnumDeclarationMetaModel enumDeclarationMetaModel = new EnumDeclarationMetaModel(Optional.of(typeDeclarationMetaModel)); public static final FieldDeclarationMetaModel fieldDeclarationMetaModel = new FieldDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final InitializerDeclarationMetaModel initializerDeclarationMetaModel = new InitializerDeclarationMetaModel(Optional.of(bodyDeclarationMetaModel)); public static final MethodDeclarationMetaModel methodDeclarationMetaModel = new MethodDeclarationMetaModel(Optional.of(callableDeclarationMetaModel)); public static final ParameterMetaModel parameterMetaModel = new ParameterMetaModel(Optional.of(nodeMetaModel)); public static final VariableDeclaratorMetaModel variableDeclaratorMetaModel = new VariableDeclaratorMetaModel(Optional.of(nodeMetaModel)); public static final CommentMetaModel commentMetaModel = new CommentMetaModel(Optional.of(nodeMetaModel)); public static final BlockCommentMetaModel blockCommentMetaModel = new BlockCommentMetaModel(Optional.of(commentMetaModel)); public static final JavadocCommentMetaModel javadocCommentMetaModel = new JavadocCommentMetaModel(Optional.of(commentMetaModel)); public static final LineCommentMetaModel lineCommentMetaModel = new LineCommentMetaModel(Optional.of(commentMetaModel)); public static final ArrayAccessExprMetaModel arrayAccessExprMetaModel = new ArrayAccessExprMetaModel(Optional.of(expressionMetaModel)); public static final ArrayCreationExprMetaModel arrayCreationExprMetaModel = new ArrayCreationExprMetaModel(Optional.of(expressionMetaModel)); public static final ArrayInitializerExprMetaModel arrayInitializerExprMetaModel = new ArrayInitializerExprMetaModel(Optional.of(expressionMetaModel)); public static final AssignExprMetaModel assignExprMetaModel = new AssignExprMetaModel(Optional.of(expressionMetaModel)); public static final BinaryExprMetaModel binaryExprMetaModel = new BinaryExprMetaModel(Optional.of(expressionMetaModel)); public static final BooleanLiteralExprMetaModel booleanLiteralExprMetaModel = new BooleanLiteralExprMetaModel(Optional.of(literalExprMetaModel)); public static final CastExprMetaModel castExprMetaModel = new CastExprMetaModel(Optional.of(expressionMetaModel)); public static final CharLiteralExprMetaModel charLiteralExprMetaModel = new CharLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel)); public static final ClassExprMetaModel classExprMetaModel = new ClassExprMetaModel(Optional.of(expressionMetaModel)); public static final ConditionalExprMetaModel conditionalExprMetaModel = new ConditionalExprMetaModel(Optional.of(expressionMetaModel)); public static final DoubleLiteralExprMetaModel doubleLiteralExprMetaModel = new DoubleLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel)); public static final EnclosedExprMetaModel enclosedExprMetaModel = new EnclosedExprMetaModel(Optional.of(expressionMetaModel)); public static final FieldAccessExprMetaModel fieldAccessExprMetaModel = new FieldAccessExprMetaModel(Optional.of(expressionMetaModel)); public static final InstanceOfExprMetaModel instanceOfExprMetaModel = new InstanceOfExprMetaModel(Optional.of(expressionMetaModel)); public static final IntegerLiteralExprMetaModel integerLiteralExprMetaModel = new IntegerLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel)); public static final LambdaExprMetaModel lambdaExprMetaModel = new LambdaExprMetaModel(Optional.of(expressionMetaModel)); public static final LongLiteralExprMetaModel longLiteralExprMetaModel = new LongLiteralExprMetaModel(Optional.of(literalStringValueExprMetaModel)); public static final MarkerAnnotationExprMetaModel markerAnnotationExprMetaModel = new MarkerAnnotationExprMetaModel(Optional.of(annotationExprMetaModel)); public static final MemberValuePairMetaModel memberValuePairMetaModel = new MemberValuePairMetaModel(Optional.of(nodeMetaModel)); public static final MethodCallExprMetaModel methodCallExprMetaModel = new MethodCallExprMetaModel(Optional.of(expressionMetaModel)); public static final MethodReferenceExprMetaModel methodReferenceExprMetaModel = new MethodReferenceExprMetaModel(Optional.of(expressionMetaModel)); public static final NameExprMetaModel nameExprMetaModel = new NameExprMetaModel(Optional.of(expressionMetaModel)); public static final NameMetaModel nameMetaModel = new NameMetaModel(Optional.of(nodeMetaModel)); public static final NormalAnnotationExprMetaModel normalAnnotationExprMetaModel = new NormalAnnotationExprMetaModel(Optional.of(annotationExprMetaModel)); public static final NullLiteralExprMetaModel nullLiteralExprMetaModel = new NullLiteralExprMetaModel(Optional.of(literalExprMetaModel)); public static final ObjectCreationExprMetaModel objectCreationExprMetaModel = new ObjectCreationExprMetaModel(Optional.of(expressionMetaModel)); public static final SimpleNameMetaModel simpleNameMetaModel = new SimpleNameMetaModel(Optional.of(nodeMetaModel)); public static final SingleMemberAnnotationExprMetaModel singleMemberAnnotationExprMetaModel = new SingleMemberAnnotationExprMetaModel(Optional.of(annotationExprMetaModel)); public static final SuperExprMetaModel superExprMetaModel = new SuperExprMetaModel(Optional.of(expressionMetaModel)); public static final ThisExprMetaModel thisExprMetaModel = new ThisExprMetaModel(Optional.of(expressionMetaModel)); public static final TypeExprMetaModel typeExprMetaModel = new TypeExprMetaModel(Optional.of(expressionMetaModel)); public static final UnaryExprMetaModel unaryExprMetaModel = new UnaryExprMetaModel(Optional.of(expressionMetaModel)); public static final VariableDeclarationExprMetaModel variableDeclarationExprMetaModel = new VariableDeclarationExprMetaModel(Optional.of(expressionMetaModel)); public static final ImportDeclarationMetaModel importDeclarationMetaModel = new ImportDeclarationMetaModel(Optional.of(nodeMetaModel)); public static final AssertStmtMetaModel assertStmtMetaModel = new AssertStmtMetaModel(Optional.of(statementMetaModel)); public static final BlockStmtMetaModel blockStmtMetaModel = new BlockStmtMetaModel(Optional.of(statementMetaModel)); public static final BreakStmtMetaModel breakStmtMetaModel = new BreakStmtMetaModel(Optional.of(statementMetaModel)); public static final CatchClauseMetaModel catchClauseMetaModel = new CatchClauseMetaModel(Optional.of(nodeMetaModel)); public static final ContinueStmtMetaModel continueStmtMetaModel = new ContinueStmtMetaModel(Optional.of(statementMetaModel)); public static final DoStmtMetaModel doStmtMetaModel = new DoStmtMetaModel(Optional.of(statementMetaModel)); public static final EmptyStmtMetaModel emptyStmtMetaModel = new EmptyStmtMetaModel(Optional.of(statementMetaModel)); public static final ExplicitConstructorInvocationStmtMetaModel explicitConstructorInvocationStmtMetaModel = new ExplicitConstructorInvocationStmtMetaModel(Optional.of(statementMetaModel)); public static final ExpressionStmtMetaModel expressionStmtMetaModel = new ExpressionStmtMetaModel(Optional.of(statementMetaModel)); public static final ForeachStmtMetaModel foreachStmtMetaModel = new ForeachStmtMetaModel(Optional.of(statementMetaModel)); public static final ForStmtMetaModel forStmtMetaModel = new ForStmtMetaModel(Optional.of(statementMetaModel)); public static final IfStmtMetaModel ifStmtMetaModel = new IfStmtMetaModel(Optional.of(statementMetaModel)); public static final LabeledStmtMetaModel labeledStmtMetaModel = new LabeledStmtMetaModel(Optional.of(statementMetaModel)); public static final ReturnStmtMetaModel returnStmtMetaModel = new ReturnStmtMetaModel(Optional.of(statementMetaModel)); public static final SwitchEntryStmtMetaModel switchEntryStmtMetaModel = new SwitchEntryStmtMetaModel(Optional.of(statementMetaModel)); public static final SwitchStmtMetaModel switchStmtMetaModel = new SwitchStmtMetaModel(Optional.of(statementMetaModel)); public static final SynchronizedStmtMetaModel synchronizedStmtMetaModel = new SynchronizedStmtMetaModel(Optional.of(statementMetaModel)); public static final ThrowStmtMetaModel throwStmtMetaModel = new ThrowStmtMetaModel(Optional.of(statementMetaModel)); public static final TryStmtMetaModel tryStmtMetaModel = new TryStmtMetaModel(Optional.of(statementMetaModel)); public static final LocalClassDeclarationStmtMetaModel localClassDeclarationStmtMetaModel = new LocalClassDeclarationStmtMetaModel(Optional.of(statementMetaModel)); public static final WhileStmtMetaModel whileStmtMetaModel = new WhileStmtMetaModel(Optional.of(statementMetaModel)); public static final ArrayTypeMetaModel arrayTypeMetaModel = new ArrayTypeMetaModel(Optional.of(referenceTypeMetaModel)); public static final ClassOrInterfaceTypeMetaModel classOrInterfaceTypeMetaModel = new ClassOrInterfaceTypeMetaModel(Optional.of(referenceTypeMetaModel)); public static final IntersectionTypeMetaModel intersectionTypeMetaModel = new IntersectionTypeMetaModel(Optional.of(typeMetaModel)); public static final PrimitiveTypeMetaModel primitiveTypeMetaModel = new PrimitiveTypeMetaModel(Optional.of(typeMetaModel)); public static final TypeParameterMetaModel typeParameterMetaModel = new TypeParameterMetaModel(Optional.of(referenceTypeMetaModel)); public static final UnionTypeMetaModel unionTypeMetaModel = new UnionTypeMetaModel(Optional.of(typeMetaModel)); public static final UnknownTypeMetaModel unknownTypeMetaModel = new UnknownTypeMetaModel(Optional.of(typeMetaModel)); public static final VoidTypeMetaModel voidTypeMetaModel = new VoidTypeMetaModel(Optional.of(typeMetaModel)); public static final WildcardTypeMetaModel wildcardTypeMetaModel = new WildcardTypeMetaModel(Optional.of(typeMetaModel)); static { initializeNodeMetaModels(); initializePropertyMetaModels(); initializeConstructorParameters(); } }
1
10,635
Maybe I should do some sorting so that these properties don't keep moving around...
javaparser-javaparser
java
@@ -32,14 +32,6 @@ func TestCheckMountDestFalsePositive(t *testing.T) { } } -func TestCheckMountRoot(t *testing.T) { - dest := "/rootfs" - err := checkMountDestination("/rootfs", dest) - if err == nil { - t.Fatal(err) - } -} - func TestNeedsSetupDev(t *testing.T) { config := &configs.Config{ Mounts: []*configs.Mount{
1
// +build linux package libcontainer import ( "testing" "github.com/opencontainers/runc/libcontainer/configs" ) func TestCheckMountDestOnProc(t *testing.T) { dest := "/rootfs/proc/" err := checkMountDestination("/rootfs", dest) if err == nil { t.Fatal("destination inside proc should return an error") } } func TestCheckMountDestInSys(t *testing.T) { dest := "/rootfs//sys/fs/cgroup" err := checkMountDestination("/rootfs", dest) if err != nil { t.Fatal("destination inside /sys should not return an error") } } func TestCheckMountDestFalsePositive(t *testing.T) { dest := "/rootfs/sysfiles/fs/cgroup" err := checkMountDestination("/rootfs", dest) if err != nil { t.Fatal(err) } } func TestCheckMountRoot(t *testing.T) { dest := "/rootfs" err := checkMountDestination("/rootfs", dest) if err == nil { t.Fatal(err) } } func TestNeedsSetupDev(t *testing.T) { config := &configs.Config{ Mounts: []*configs.Mount{ { Device: "bind", Source: "/dev", Destination: "/dev", }, }, } if needsSetupDev(config) { t.Fatal("expected needsSetupDev to be false, got true") } } func TestNeedsSetupDevStrangeSource(t *testing.T) { config := &configs.Config{ Mounts: []*configs.Mount{ { Device: "bind", Source: "/devx", Destination: "/dev", }, }, } if needsSetupDev(config) { t.Fatal("expected needsSetupDev to be false, got true") } } func TestNeedsSetupDevStrangeDest(t *testing.T) { config := &configs.Config{ Mounts: []*configs.Mount{ { Device: "bind", Source: "/dev", Destination: "/devx", }, }, } if !needsSetupDev(config) { t.Fatal("expected needsSetupDev to be true, got false") } } func TestNeedsSetupDevStrangeSourceDest(t *testing.T) { config := &configs.Config{ Mounts: []*configs.Mount{ { Device: "bind", Source: "/devx", Destination: "/devx", }, }, } if !needsSetupDev(config) { t.Fatal("expected needsSetupDev to be true, got false") } }
1
12,922
Can we keep this test anyway, as it's part of the spec?
opencontainers-runc
go
@@ -0,0 +1,19 @@ +<?php + +/** + * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. + * See license.txt for license details. + */ + +declare(strict_types = 1); + +namespace Ergonode\Mailer; + +use Ergonode\Mailer\Application\DependencyInjection\CompilerPass\MailerStrategyInterfaceCompilerPass; +use Ergonode\SharedKernel\Application\AbstractModule; + +/** + */ +class ErgonodeMailerBundle extends AbstractModule +{ +}
1
1
8,865
Can be removed
ergonode-backend
php
@@ -1,5 +1,7 @@ <?php namespace System\Controllers; +use Config; +use Request; use Lang; use Flash; use Backend;
1
<?php namespace System\Controllers; use Lang; use Flash; use Backend; use BackendMenu; use System\Classes\SettingsManager; use Backend\Classes\Controller; use ApplicationException; use Exception; /** * Settings controller * * @package october\system * @author Alexey Bobkov, Samuel Georges * */ class Settings extends Controller { /** * @var WidgetBase Reference to the widget object. */ protected $formWidget; /** * @var array Permissions required to view this page. */ public $requiredPermissions = []; /** * Constructor. */ public function __construct() { parent::__construct(); if ($this->action == 'backend_preferences') { $this->requiredPermissions = ['backend.manage_preferences']; } $this->addCss('/modules/system/assets/css/settings/settings.css', 'core'); BackendMenu::setContext('October.System', 'system', 'settings'); } public function index() { $this->pageTitle = 'system::lang.settings.menu_label'; $this->vars['items'] = SettingsManager::instance()->listItems('system'); $this->bodyClass = 'compact-container sidenav-tree-root'; } public function mysettings() { BackendMenu::setContextSideMenu('mysettings'); $this->pageTitle = 'backend::lang.mysettings.menu_label'; $this->vars['items'] = SettingsManager::instance()->listItems('mysettings'); $this->bodyClass = 'compact-container'; } // // Generated Form // public function update($author, $plugin, $code = null) { SettingsManager::setContext($author.'.'.$plugin, $code); $this->vars['parentLink'] = Backend::url('system/settings'); $this->vars['parentLabel'] = Lang::get('system::lang.settings.menu_label'); try { if (!$item = $this->findSettingItem($author, $plugin, $code)) { throw new ApplicationException(Lang::get('system::lang.settings.not_found')); } $this->pageTitle = $item->label; if ($item->context == 'mysettings') { $this->vars['parentLink'] = Backend::url('system/settings/mysettings'); $this->vars['parentLabel'] = Lang::get('backend::lang.mysettings.menu_label'); } $model = $this->createModel($item); $this->initWidgets($model); } catch (Exception $ex) { $this->handleError($ex); } } public function update_onSave($author, $plugin, $code = null) { $item = $this->findSettingItem($author, $plugin, $code); $model = $this->createModel($item); $this->initWidgets($model); $saveData = $this->formWidget->getSaveData(); foreach ($saveData as $attribute => $value) { $model->{$attribute} = $value; } $model->save(null, $this->formWidget->getSessionKey()); Flash::success(Lang::get('system::lang.settings.update_success', ['name' => Lang::get($item->label)])); /* * Handle redirect */ if ($redirectUrl = post('redirect', true)) { $redirectUrl = ($item->context == 'mysettings') ? 'system/settings/mysettings' : 'system/settings'; return Backend::redirect($redirectUrl); } } public function update_onResetDefault($author, $plugin, $code = null) { $item = $this->findSettingItem($author, $plugin, $code); $model = $this->createModel($item); $model->resetDefault(); Flash::success(Lang::get('backend::lang.form.reset_success')); return Backend::redirect('system/settings/update/'.$author.'/'.$plugin.'/'.$code); } /** * Render the form. */ public function formRender($options = []) { if (!$this->formWidget) { throw new ApplicationException(Lang::get('backend::lang.form.behavior_not_ready')); } return $this->formWidget->render($options); } /** * Prepare the widgets used by this action * Model $model */ protected function initWidgets($model) { $config = $model->getFieldConfig(); $config->model = $model; $config->arrayName = class_basename($model); $config->context = 'update'; $widget = $this->makeWidget('Backend\Widgets\Form', $config); $widget->bindToController(); $this->formWidget = $widget; } /** * Internal method, prepare the list model object */ protected function createModel($item) { if (!isset($item->class) || !strlen($item->class)) { throw new ApplicationException(Lang::get('system::lang.settings.missing_model')); } $class = $item->class; return $class::instance(); } /** * Locates a setting item for a module or plugin */ protected function findSettingItem($author, $plugin, $code) { $manager = SettingsManager::instance(); $moduleOwner = $author; $moduleCode = $plugin; $item = $manager->findSettingItem($moduleOwner, $moduleCode); if (!$item) { $pluginOwner = $author . '.' . $plugin; $pluginCode = $code; $item = $manager->findSettingItem($pluginOwner, $pluginCode); } return $item; } }
1
18,863
It's a minor quibble I know, but I like having the imports ordered by lengt
octobercms-october
php
@@ -216,7 +216,7 @@ public abstract class AbstractRestInvocation { } } responseEx.setStatus(response.getStatusCode(), response.getReasonPhrase()); - responseEx.setContentType(produceProcessor.getName()); + responseEx.setContentType(produceProcessor.getName()+"; charset=utf-8"); Object body = response.getResult(); if (response.isFailed()) {
1
/* * Copyright 2017 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.servicecomb.common.rest; import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Map.Entry; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.Response.Status; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.netty.buffer.Unpooled; import io.servicecomb.common.rest.codec.RestCodec; import io.servicecomb.common.rest.codec.produce.ProduceProcessor; import io.servicecomb.common.rest.codec.produce.ProduceProcessorManager; import io.servicecomb.common.rest.definition.RestOperationMeta; import io.servicecomb.common.rest.filter.HttpServerFilter; import io.servicecomb.common.rest.locator.OperationLocator; import io.servicecomb.common.rest.locator.ServicePathManager; import io.servicecomb.core.Const; import io.servicecomb.core.Invocation; import io.servicecomb.core.definition.MicroserviceMeta; import io.servicecomb.core.definition.OperationMeta; import io.servicecomb.core.metrics.InvocationStartedEvent; import io.servicecomb.foundation.common.utils.EventUtils; import io.servicecomb.foundation.common.utils.JsonUtils; import io.servicecomb.foundation.vertx.http.HttpServletRequestEx; import io.servicecomb.foundation.vertx.http.HttpServletResponseEx; import io.servicecomb.foundation.vertx.stream.BufferOutputStream; import io.servicecomb.swagger.invocation.Response; import io.servicecomb.swagger.invocation.exception.InvocationException; public abstract class AbstractRestInvocation { private static final Logger LOGGER = LoggerFactory.getLogger(AbstractRestInvocation.class); protected RestOperationMeta restOperationMeta; protected Invocation invocation; protected HttpServletRequestEx requestEx; protected HttpServletResponseEx responseEx; protected ProduceProcessor produceProcessor; protected List<HttpServerFilter> httpServerFilters = Collections.emptyList(); public void setHttpServerFilters(List<HttpServerFilter> httpServerFilters) { this.httpServerFilters = httpServerFilters; } protected void findRestOperation(MicroserviceMeta microserviceMeta) { ServicePathManager servicePathManager = ServicePathManager.getServicePathManager(microserviceMeta); if (servicePathManager == null) { LOGGER.error("No schema defined for {}:{}.", microserviceMeta.getAppId(), microserviceMeta.getName()); throw new InvocationException(Status.NOT_FOUND, Status.NOT_FOUND.getReasonPhrase()); } OperationLocator locator = locateOperation(servicePathManager); requestEx.setAttribute(RestConst.PATH_PARAMETERS, locator.getPathVarMap()); this.restOperationMeta = locator.getOperation(); } protected void initProduceProcessor() { produceProcessor = restOperationMeta.ensureFindProduceProcessor(requestEx); if (produceProcessor == null) { String msg = String.format("Accept %s is not supported", requestEx.getHeader(HttpHeaders.ACCEPT)); throw new InvocationException(Status.NOT_ACCEPTABLE, msg); } } protected void setContext() throws Exception { String strCseContext = requestEx.getHeader(Const.CSE_CONTEXT); if (StringUtils.isEmpty(strCseContext)) { return; } @SuppressWarnings("unchecked") Map<String, String> cseContext = JsonUtils.readValue(strCseContext.getBytes(StandardCharsets.UTF_8), Map.class); invocation.setContext(cseContext); } protected void scheduleInvocation() { OperationMeta operationMeta = restOperationMeta.getOperationMeta(); InvocationStartedEvent startedEvent = new InvocationStartedEvent(operationMeta.getMicroserviceQualifiedName(), System.nanoTime()); EventUtils.triggerEvent(startedEvent); operationMeta.getExecutor().execute(() -> { synchronized (this.requestEx) { try { if (requestEx.getAttribute(RestConst.REST_REQUEST) != requestEx) { // already timeout // in this time, request maybe recycled and reused by web container, do not use requestEx LOGGER.error("Rest request already timeout, abandon execute, method {}, operation {}.", operationMeta.getHttpMethod(), operationMeta.getMicroserviceQualifiedName()); return; } runOnExecutor(startedEvent); } catch (Throwable e) { LOGGER.error("rest server onRequest error", e); sendFailResponse(e); } } }); } protected void runOnExecutor(InvocationStartedEvent startedEvent) { Object[] args = RestCodec.restToArgs(requestEx, restOperationMeta); createInvocation(args); //立刻设置开始时间,否则Finished时无法计算TotalTime invocation.setStartTime(startedEvent.getStartedTime()); invocation.triggerStartProcessingEvent(); invoke(); } protected abstract OperationLocator locateOperation(ServicePathManager servicePathManager); protected abstract void createInvocation(Object[] args); public void invoke() { try { Response response = prepareInvoke(); if (response != null) { sendResponseQuietly(response); return; } doInvoke(); } catch (Throwable e) { LOGGER.error("unknown rest exception.", e); sendFailResponse(e); } } protected Response prepareInvoke() throws Throwable { this.initProduceProcessor(); this.setContext(); invocation.getHandlerContext().put(RestConst.REST_REQUEST, requestEx); for (HttpServerFilter filter : httpServerFilters) { Response response = filter.afterReceiveRequest(invocation, requestEx); if (response != null) { return response; } } return null; } protected void doInvoke() throws Throwable { invocation.next(resp -> { sendResponseQuietly(resp); invocation.triggerFinishedEvent(); }); } public void sendFailResponse(Throwable throwable) { if (produceProcessor == null) { produceProcessor = ProduceProcessorManager.DEFAULT_PROCESSOR; } Response response = Response.createProducerFail(throwable); sendResponseQuietly(response); } protected void sendResponseQuietly(Response response) { try { sendResponse(response); } catch (Throwable e) { LOGGER.error("Failed to send rest response, operation:{}.", invocation.getMicroserviceQualifiedName(), e); } finally { requestEx.getAsyncContext().complete(); } } @SuppressWarnings("deprecation") protected void sendResponse(Response response) throws Exception { if (response.getHeaders().getHeaderMap() != null) { for (Entry<String, List<Object>> entry : response.getHeaders().getHeaderMap().entrySet()) { for (Object value : entry.getValue()) { if (!entry.getKey().equalsIgnoreCase(HttpHeaders.CONTENT_LENGTH)) { responseEx.addHeader(entry.getKey(), String.valueOf(value)); } } } } responseEx.setStatus(response.getStatusCode(), response.getReasonPhrase()); responseEx.setContentType(produceProcessor.getName()); Object body = response.getResult(); if (response.isFailed()) { body = ((InvocationException) body).getErrorData(); } try (BufferOutputStream output = new BufferOutputStream(Unpooled.compositeBuffer())) { produceProcessor.encodeResponse(output, body); responseEx.setBodyBuffer(output.getBuffer()); for (HttpServerFilter filter : httpServerFilters) { filter.beforeSendResponse(invocation, responseEx); } responseEx.flushBuffer(); } } }
1
7,961
It's better to get the charset from context or other setting to let the user override it.
apache-servicecomb-java-chassis
java
@@ -70,7 +70,7 @@ func NewCStorPoolController( cStorInformerFactory informers.SharedInformerFactory) *CStorPoolController { // obtain references to shared index informers for the cStorPool resources - cStorPoolInformer := cStorInformerFactory.Openebs().V1alpha1().NewTestCStorPools() + cStorPoolInformer := cStorInformerFactory.Openebs().V1alpha1().CStorPoolInstances() zpool.KubeClient = kubeclientset zpool.OpenEBSClient = clientset
1
/* Copyright 2018 The OpenEBS Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package poolcontroller import ( "github.com/golang/glog" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/runtime" kubeinformers "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" "github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common" zpool "github.com/openebs/maya/cmd/cstor-pool-mgmt/pool/v1alpha2" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned" openebsScheme "github.com/openebs/maya/pkg/client/generated/clientset/versioned/scheme" informers "github.com/openebs/maya/pkg/client/generated/informers/externalversions" //openebsScheme "github.com/openebs/maya/pkg/client/generated/openebs.io/v1alpha2/clientset/internalclientset/scheme" ) const poolControllerName = "CStorPool" // CStorPoolController is the controller implementation for CStorPool resources. type CStorPoolController struct { // kubeclientset is a standard kubernetes clientset kubeclientset kubernetes.Interface // clientset is a openebs custom resource package generated for custom API group. clientset clientset.Interface // cStorPoolSynced is used for caches sync to get populated cStorPoolSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder } // NewCStorPoolController returns a new instance of CStorPool controller func NewCStorPoolController( kubeclientset kubernetes.Interface, clientset clientset.Interface, kubeInformerFactory kubeinformers.SharedInformerFactory, cStorInformerFactory informers.SharedInformerFactory) *CStorPoolController { // obtain references to shared index informers for the cStorPool resources cStorPoolInformer := cStorInformerFactory.Openebs().V1alpha1().NewTestCStorPools() zpool.KubeClient = kubeclientset zpool.OpenEBSClient = clientset err := openebsScheme.AddToScheme(scheme.Scheme) if err != nil { glog.Errorf("failed to add to scheme: error {%v}", err) return nil } // Create event broadcaster to receive events and send them to any EventSink, watcher, or log. // Add NewCstorPoolController types to the default Kubernetes Scheme so Events can be // logged for CstorPool Controller types. glog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(glog.Infof) // StartEventWatcher starts sending events received from this EventBroadcaster to the given // event handler function. The return value can be ignored or used to stop recording, if // desired. Events("") denotes empty namespace eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: poolControllerName}) controller := &CStorPoolController{ kubeclientset: kubeclientset, clientset: clientset, cStorPoolSynced: cStorPoolInformer.Informer().HasSynced, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), poolControllerName), recorder: recorder, } glog.Info("Setting up event handlers for CSP") // Set up an event handler for when CstorPool resources change. cStorPoolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { csp := obj.(*apis.NewTestCStorPool) if !IsRightCStorPoolMgmt(csp) { return } controller.enqueueCStorPool(csp) }, UpdateFunc: func(oldVar, newVar interface{}) { csp := newVar.(*apis.NewTestCStorPool) if !IsRightCStorPoolMgmt(csp) { return } controller.enqueueCStorPool(csp) }, }) return controller } // enqueueCstorPool takes a CStorPool resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than CStorPools. func (c *CStorPoolController) enqueueCStorPool(obj *apis.NewTestCStorPool) { key, err := cache.MetaNamespaceKeyFunc(obj) if err != nil { runtime.HandleError(err) return } c.workqueue.AddRateLimited(common.QueueLoad{Key: key}) }
1
17,144
this filename as well needs change
openebs-maya
go
@@ -179,10 +179,11 @@ type ACMEIssuerDNS01ProviderCloudflare struct { // ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53 // configuration for AWS type ACMEIssuerDNS01ProviderRoute53 struct { - AccessKeyID string `json:"accessKeyID"` - SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"` - HostedZoneID string `json:"hostedZoneID"` - Region string `json:"region"` + AccessKeyID string `json:"accessKeyID,omitempty"` + AccessKeyIDRef SecretKeySelector `json:"accessKeyIDSecretRef,omitempty"` + SecretAccessKeyRef SecretKeySelector `json:"secretAccessKeySecretRef"` + HostedZoneID string `json:"hostedZoneID"` + Region string `json:"region"` } // ACMEIssuerDNS01ProviderAzureDNS is a structure containing the
1
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( AltNamesAnnotationKey = "certmanager.k8s.io/alt-names" CommonNameAnnotationKey = "certmanager.k8s.io/common-name" IssuerNameAnnotationKey = "certmanager.k8s.io/issuer-name" IssuerKindAnnotationKey = "certmanager.k8s.io/issuer-kind" ) // +genclient // +genclient:nonNamespaced // +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +resource:path=clusterissuers type ClusterIssuer struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec IssuerSpec `json:"spec,omitempty"` Status IssuerStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterIssuerList is a list of Issuers type ClusterIssuerList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []ClusterIssuer `json:"items"` } // +genclient // +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +resource:path=issuers type Issuer struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec IssuerSpec `json:"spec,omitempty"` Status IssuerStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // IssuerList is a list of Issuers type IssuerList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []Issuer `json:"items"` } // IssuerSpec is the specification of an Issuer. This includes any // configuration required for the issuer. type IssuerSpec struct { IssuerConfig `json:",inline"` } type IssuerConfig struct { ACME *ACMEIssuer `json:"acme,omitempty"` CA *CAIssuer `json:"ca,omitempty"` Vault *VaultIssuer `json:"vault,omitempty"` } type VaultIssuer struct { // Vault authentication Auth VaultAuth `json:"auth"` // Server is the vault connection address Server string `json:"server"` // Vault URL path to the certificate role Path string `json:"path"` } // Vault authentication can be configured: // - With a secret containing a token. Cert-manager is using this token as-is. // - With a secret containing a AppRole. This AppRole is used to authenticate to // Vault and retrieve a token. type VaultAuth struct { // This Secret contains the Vault token key TokenSecretRef SecretKeySelector `json:"tokenSecretRef,omitempty"` // This Secret contains a AppRole and Secret AppRole VaultAppRole `json:"appRole,omitempty"` } type VaultAppRole struct { RoleId string `json:"roleId"` SecretRef SecretKeySelector `json:"secretRef"` } type CAIssuer struct { // SecretName is the name of the secret used to sign Certificates issued // by this Issuer. SecretName string `json:"secretName"` } // ACMEIssuer contains the specification for an ACME issuer type ACMEIssuer struct { // Email is the email for this account Email string `json:"email"` // Server is the ACME server URL Server string `json:"server"` // If true, skip verifying the ACME server TLS certificate SkipTLSVerify bool `json:"skipTLSVerify,omitempty"` // PrivateKey is the name of a secret containing the private key for this // user account. PrivateKey SecretKeySelector `json:"privateKeySecretRef"` // HTTP01 config HTTP01 *ACMEIssuerHTTP01Config `json:"http01,omitempty"` // DNS-01 config DNS01 *ACMEIssuerDNS01Config `json:"dns01,omitempty"` } type ACMEIssuerHTTP01Config struct { } // ACMEIssuerDNS01Config is a structure containing the ACME DNS configuration // options type ACMEIssuerDNS01Config struct { Providers []ACMEIssuerDNS01Provider `json:"providers"` } type ACMEIssuerDNS01Provider struct { Name string `json:"name"` Akamai *ACMEIssuerDNS01ProviderAkamai `json:"akamai,omitempty"` CloudDNS *ACMEIssuerDNS01ProviderCloudDNS `json:"clouddns,omitempty"` Cloudflare *ACMEIssuerDNS01ProviderCloudflare `json:"cloudflare,omitempty"` Route53 *ACMEIssuerDNS01ProviderRoute53 `json:"route53,omitempty"` AzureDNS *ACMEIssuerDNS01ProviderAzureDNS `json:"azuredns,omitempty"` } // ACMEIssuerDNS01ProviderAkamai is a structure containing the DNS // configuration for Akamai DNS—Zone Record Management API type ACMEIssuerDNS01ProviderAkamai struct { ServiceConsumerDomain string `json:"serviceConsumerDomain"` ClientToken SecretKeySelector `json:"clientTokenSecretRef"` ClientSecret SecretKeySelector `json:"clientSecretSecretRef"` AccessToken SecretKeySelector `json:"accessTokenSecretRef"` } // ACMEIssuerDNS01ProviderCloudDNS is a structure containing the DNS // configuration for Google Cloud DNS type ACMEIssuerDNS01ProviderCloudDNS struct { ServiceAccount SecretKeySelector `json:"serviceAccountSecretRef"` Project string `json:"project"` } // ACMEIssuerDNS01ProviderCloudflare is a structure containing the DNS // configuration for Cloudflare type ACMEIssuerDNS01ProviderCloudflare struct { Email string `json:"email"` APIKey SecretKeySelector `json:"apiKeySecretRef"` } // ACMEIssuerDNS01ProviderRoute53 is a structure containing the Route 53 // configuration for AWS type ACMEIssuerDNS01ProviderRoute53 struct { AccessKeyID string `json:"accessKeyID"` SecretAccessKey SecretKeySelector `json:"secretAccessKeySecretRef"` HostedZoneID string `json:"hostedZoneID"` Region string `json:"region"` } // ACMEIssuerDNS01ProviderAzureDNS is a structure containing the // configuration for Azure DNS type ACMEIssuerDNS01ProviderAzureDNS struct { ClientID string `json:"clientID"` ClientSecret SecretKeySelector `json:"clientSecretSecretRef"` SubscriptionID string `json:"subscriptionID"` TenantID string `json:"tenantID"` ResourceGroupName string `json:"resourceGroupName"` // + optional HostedZoneName string `json:"hostedZoneName"` } // IssuerStatus contains status information about an Issuer type IssuerStatus struct { Conditions []IssuerCondition `json:"conditions"` ACME *ACMEIssuerStatus `json:"acme,omitempty"` } // IssuerCondition contains condition information for an Issuer. type IssuerCondition struct { // Type of the condition, currently ('Ready'). Type IssuerConditionType `json:"type"` // Status of the condition, one of ('True', 'False', 'Unknown'). Status ConditionStatus `json:"status"` // LastTransitionTime is the timestamp corresponding to the last status // change of this condition. LastTransitionTime metav1.Time `json:"lastTransitionTime"` // Reason is a brief machine readable explanation for the condition's last // transition. Reason string `json:"reason"` // Message is a human readable description of the details of the last // transition, complementing reason. Message string `json:"message"` } // IssuerConditionType represents an Issuer condition value. type IssuerConditionType string const ( // IssuerConditionReady represents the fact that a given Issuer condition // is in ready state. IssuerConditionReady IssuerConditionType = "Ready" ) // ConditionStatus represents a condition's status. type ConditionStatus string // These are valid condition statuses. "ConditionTrue" means a resource is in // the condition; "ConditionFalse" means a resource is not in the condition; // "ConditionUnknown" means kubernetes can't decide if a resource is in the // condition or not. In the future, we could add other intermediate // conditions, e.g. ConditionDegraded. const ( // ConditionTrue represents the fact that a given condition is true ConditionTrue ConditionStatus = "True" // ConditionFalse represents the fact that a given condition is false ConditionFalse ConditionStatus = "False" // ConditionUnknown represents the fact that a given condition is unknown ConditionUnknown ConditionStatus = "Unknown" ) type ACMEIssuerStatus struct { // URI is the unique account identifier, which can also be used to retrieve // account details from the CA URI string `json:"uri"` } // +genclient // +k8s:openapi-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +resource:path=certificates // Certificate is a type to represent a Certificate from ACME type Certificate struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec CertificateSpec `json:"spec,omitempty"` Status CertificateStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CertificateList is a list of Certificates type CertificateList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []Certificate `json:"items"` } // CertificateSpec defines the desired state of Certificate type CertificateSpec struct { // CommonName is a common name to be used on the Certificate CommonName string `json:"commonName"` // DNSNames is a list of subject alt names to be used on the Certificate DNSNames []string `json:"dnsNames"` // SecretName is the name of the secret resource to store this secret in SecretName string `json:"secretName"` // IssuerRef is a reference to the issuer for this certificate. If the // namespace field is not set, it is assumed to be in the same namespace // as the certificate. If the namespace field is set to the empty value "", // a ClusterIssuer of the given name will be used. Any other value is // invalid. IssuerRef ObjectReference `json:"issuerRef"` ACME *ACMECertificateConfig `json:"acme,omitempty"` } // ACMEConfig contains the configuration for the ACME certificate provider type ACMECertificateConfig struct { Config []ACMECertificateDomainConfig `json:"config"` } type ACMECertificateDomainConfig struct { Domains []string `json:"domains"` ACMESolverConfig `json:",inline"` } type ACMESolverConfig struct { HTTP01 *ACMECertificateHTTP01Config `json:"http01,omitempty"` DNS01 *ACMECertificateDNS01Config `json:"dns01,omitempty"` } type ACMECertificateHTTP01Config struct { Ingress string `json:"ingress"` IngressClass *string `json:"ingressClass,omitempty"` } type ACMECertificateDNS01Config struct { Provider string `json:"provider"` } // CertificateStatus defines the observed state of Certificate type CertificateStatus struct { Conditions []CertificateCondition `json:"conditions,omitempty"` ACME *CertificateACMEStatus `json:"acme,omitempty"` } // CertificateCondition contains condition information for an Certificate. type CertificateCondition struct { // Type of the condition, currently ('Ready'). Type CertificateConditionType `json:"type"` // Status of the condition, one of ('True', 'False', 'Unknown'). Status ConditionStatus `json:"status"` // LastTransitionTime is the timestamp corresponding to the last status // change of this condition. LastTransitionTime metav1.Time `json:"lastTransitionTime"` // Reason is a brief machine readable explanation for the condition's last // transition. Reason string `json:"reason"` // Message is a human readable description of the details of the last // transition, complementing reason. Message string `json:"message"` } // CertificateConditionType represents an Certificate condition value. type CertificateConditionType string const ( // CertificateConditionReady represents the fact that a given Certificate condition // is in ready state. CertificateConditionReady CertificateConditionType = "Ready" // CertificateConditionValidationFailed is used to indicate whether a // validation for a Certificate has failed. // This is currently used by the ACME issuer to track when the last // validation was attempted. CertificateConditionValidationFailed CertificateConditionType = "ValidateFailed" ) // CertificateACMEStatus holds the status for an ACME issuer type CertificateACMEStatus struct { // Order contains details about the current in-progress ACME Order. Order ACMEOrderStatus `json:"order,omitempty"` } type ACMEOrderStatus struct { // The URL that can be used to get information about the ACME order. URL string `json:"url"` Challenges []ACMEOrderChallenge `json:"challenges,omitempty"` } type ACMEOrderChallenge struct { // The URL that can be used to get information about the ACME challenge. URL string `json:"url"` // The URL that can be used to get information about the ACME authorization // associated with the challenge. AuthzURL string `json:"authzURL"` // Type of ACME challenge // Either http-01 or dns-01 Type string `json:"type"` // Domain this challenge corresponds to Domain string `json:"domain"` // Challenge token for this challenge Token string `json:"token"` // Challenge key for this challenge Key string `json:"key"` // Set to true if this challenge is for a wildcard domain Wildcard bool `json:"wildcard"` // Configuration used to present this challenge ACMESolverConfig `json:",inline"` } type LocalObjectReference struct { // Name of the referent. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names // TODO: Add other useful fields. apiVersion, kind, uid? Name string `json:"name,omitempty"` } // ObjectReference is a reference to an object. If the namespace field is set, // it is assumed to be in a namespace type ObjectReference struct { Name string `json:"name"` Kind string `json:"kind,omitempty"` } const ( ClusterIssuerKind = "ClusterIssuer" IssuerKind = "Issuer" ) type SecretKeySelector struct { // The name of the secret in the pod's namespace to select from. LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` }
1
11,039
I think we want to remove `omitempty` here
jetstack-cert-manager
go
@@ -129,6 +129,8 @@ func startContainer(context *cli.Context, spec *specs.LinuxSpec) (int, error) { if err != nil { return -1, err } + handler := newSignalHandler(tty) + defer handler.Close() if err := container.Start(process); err != nil { return -1, err }
1
// +build linux package main import ( "fmt" "os" "runtime" "syscall" "github.com/Sirupsen/logrus" "github.com/codegangsta/cli" "github.com/coreos/go-systemd/activation" "github.com/opencontainers/runc/libcontainer" "github.com/opencontainers/specs" ) // default action is to start a container var startCommand = cli.Command{ Name: "start", Usage: "create and run a container", Flags: []cli.Flag{ cli.StringFlag{ Name: "bundle, b", Value: "", Usage: "path to the root of the bundle directory", }, cli.StringFlag{ Name: "console", Value: "", Usage: "specify the pty slave path for use with the container", }, cli.BoolFlag{ Name: "detach,d", Usage: "detach from the container's process", }, cli.StringFlag{ Name: "pid-file", Value: "", Usage: "specify the file to write the process id to", }, }, Action: func(context *cli.Context) { bundle := context.String("bundle") if bundle != "" { if err := os.Chdir(bundle); err != nil { fatal(err) } } spec, err := loadSpec(specConfig) if err != nil { fatal(err) } notifySocket := os.Getenv("NOTIFY_SOCKET") if notifySocket != "" { setupSdNotify(spec, notifySocket) } if os.Geteuid() != 0 { logrus.Fatal("runc should be run as root") } status, err := startContainer(context, spec) if err != nil { logrus.Fatalf("Container start failed: %v", err) } // exit with the container's exit status so any external supervisor is // notified of the exit with the correct exit status. os.Exit(status) }, } func init() { if len(os.Args) > 1 && os.Args[1] == "init" { runtime.GOMAXPROCS(1) runtime.LockOSThread() factory, _ := libcontainer.New("") if err := factory.StartInitialization(); err != nil { fatal(err) } panic("--this line should have never been executed, congratulations--") } } func startContainer(context *cli.Context, spec *specs.LinuxSpec) (int, error) { id := context.Args().First() if id == "" { return -1, errEmptyID } config, err := createLibcontainerConfig(id, spec) if err != nil { return -1, err } if _, err := os.Stat(config.Rootfs); err != nil { if os.IsNotExist(err) { return -1, fmt.Errorf("rootfs (%q) does not exist", config.Rootfs) } return -1, err } rootuid, err := config.HostUID() if err != nil { return -1, err } factory, err := loadFactory(context) if err != nil { return -1, err } container, err := factory.Create(id, config) if err != nil { return -1, err } // ensure that the container is always removed if we were the process // that created it. detach := context.Bool("detach") if !detach { defer destroy(container) } process := newProcess(spec.Process) // Support on-demand socket activation by passing file descriptors into the container init process. if os.Getenv("LISTEN_FDS") != "" { listenFds := activation.Files(false) if len(listenFds) > 0 { process.Env = append(process.Env, fmt.Sprintf("LISTEN_FDS=%d", len(listenFds)), "LISTEN_PID=1") process.ExtraFiles = append(process.ExtraFiles, listenFds...) } } tty, err := setupIO(process, rootuid, context.String("console"), spec.Process.Terminal, detach) if err != nil { return -1, err } if err := container.Start(process); err != nil { return -1, err } if pidFile := context.String("pid-file"); pidFile != "" { if err := createPidFile(pidFile, process); err != nil { process.Signal(syscall.SIGKILL) process.Wait() return -1, err } } if detach { return 0, nil } handler := newSignalHandler(tty) defer handler.Close() return handler.forward(process) }
1
9,732
You cannot move this here because it breaks detach. Just call `tty.Close()` before returning the error from start
opencontainers-runc
go
@@ -376,8 +376,7 @@ lookupTables.role = { one: ['rowgroup', 'row'] }, nameFrom: ['author'], - context: null, - implicit: ['table'] + context: null }, 'gridcell': { type: 'widget',
1
/** * Namespace for aria-related utilities. * @namespace commons.aria * @memberof axe */ var aria = commons.aria = {}, lookupTables = aria._lut = {}; lookupTables.attributes = { 'aria-activedescendant': { type: 'idref' }, 'aria-atomic': { type: 'boolean', values: ['true', 'false'] }, 'aria-autocomplete': { type: 'nmtoken', values: ['inline', 'list', 'both', 'none'] }, 'aria-busy': { type: 'boolean', values: ['true', 'false'] }, 'aria-checked': { type: 'nmtoken', values: ['true', 'false', 'mixed', 'undefined'] }, 'aria-colcount': { type: 'int' }, 'aria-colindex': { type: 'int' }, 'aria-colspan': { type: 'int' }, 'aria-controls': { type: 'idrefs' }, 'aria-current': { type: 'nmtoken', values: ['page', 'step', 'location', 'date', 'time', 'true', 'false'] }, 'aria-describedby': { type: 'idrefs' }, 'aria-disabled': { type: 'boolean', values: ['true', 'false'] }, 'aria-dropeffect': { type: 'nmtokens', values: ['copy', 'move', 'reference', 'execute', 'popup', 'none'] }, 'aria-errormessage': { type: 'idref' }, 'aria-expanded': { type: 'nmtoken', values: ['true', 'false', 'undefined'] }, 'aria-flowto': { type: 'idrefs' }, 'aria-grabbed': { type: 'nmtoken', values: ['true', 'false', 'undefined'] }, 'aria-haspopup': { type: 'nmtoken', values: ['true', 'false', 'menu', 'listbox', 'tree', 'grid', 'dialog'] }, 'aria-hidden': { type: 'boolean', values: ['true', 'false'] }, 'aria-invalid': { type: 'nmtoken', values: ['true', 'false', 'spelling', 'grammar'] }, 'aria-keyshortcuts': { type: 'string' }, 'aria-label': { type: 'string' }, 'aria-labelledby': { type: 'idrefs' }, 'aria-level': { type: 'int' }, 'aria-live': { type: 'nmtoken', values: ['off', 'polite', 'assertive'] }, 'aria-modal': { type: 'boolean', values: ['true', 'false'] }, 'aria-multiline': { type: 'boolean', values: ['true', 'false'] }, 'aria-multiselectable': { type: 'boolean', values: ['true', 'false'] }, 'aria-orientation' : { type : 'nmtoken', values : ['horizontal', 'vertical'] }, 'aria-owns': { type: 'idrefs' }, 'aria-placeholder': { type: 'string' }, 'aria-posinset': { type: 'int' }, 'aria-pressed': { type: 'nmtoken', values: ['true', 'false', 'mixed', 'undefined'] }, 'aria-readonly': { type: 'boolean', values: ['true', 'false'] }, 'aria-relevant': { type: 'nmtokens', values: ['additions', 'removals', 'text', 'all'] }, 'aria-required': { type: 'boolean', values: ['true', 'false'] }, 'aria-rowcount': { type: 'int' }, 'aria-rowindex': { type: 'int' }, 'aria-rowspan': { type: 'int' }, 'aria-selected': { type: 'nmtoken', values: ['true', 'false', 'undefined'] }, 'aria-setsize': { type: 'int' }, 'aria-sort': { type: 'nmtoken', values: ['ascending', 'descending', 'other', 'none'] }, 'aria-valuemax': { type: 'decimal' }, 'aria-valuemin': { type: 'decimal' }, 'aria-valuenow': { type: 'decimal' }, 'aria-valuetext': { type: 'string' } }; lookupTables.globalAttributes = [ 'aria-atomic', 'aria-busy', 'aria-controls', 'aria-current', 'aria-describedby', 'aria-disabled', 'aria-dropeffect', 'aria-flowto', 'aria-grabbed', 'aria-haspopup', 'aria-hidden', 'aria-invalid', 'aria-keyshortcuts', 'aria-label', 'aria-labelledby', 'aria-live', 'aria-owns', 'aria-relevant' ]; lookupTables.role = { 'alert': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'alertdialog': { type: 'widget', attributes: { allowed: ['aria-expanded', 'aria-modal'] }, owned: null, nameFrom: ['author'], context: null }, 'application': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'article': { type: 'structure', attributes: { allowed: ['aria-expanded', 'aria-posinset', 'aria-setsize'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['article'] }, 'banner': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['header'] }, 'button': { type: 'widget', attributes: { allowed: ['aria-expanded', 'aria-pressed'] }, owned: null, nameFrom: ['author', 'contents'], context: null, implicit: ['button', 'input[type="button"]', 'input[type="image"]', 'input[type="reset"]', 'input[type="submit"]', 'summary'] }, 'cell': { type: 'structure', attributes: { allowed: ['aria-colindex', 'aria-colspan', 'aria-rowindex', 'aria-rowspan'] }, owned: null, nameFrom: ['author', 'contents'], context: ['row'], implicit: ['td', 'th'] }, 'checkbox': { type: 'widget', attributes: { allowed: ['aria-checked'] }, owned: null, nameFrom: ['author', 'contents'], context: null, implicit: ['input[type="checkbox"]'] }, 'columnheader': { type: 'structure', attributes: { allowed: ['aria-colindex', 'aria-colspan', 'aria-expanded', 'aria-rowindex', 'aria-rowspan', 'aria-required', 'aria-readonly', 'aria-selected', 'aria-sort'] }, owned: null, nameFrom: ['author', 'contents'], context: ['row'], implicit: ['th'] }, 'combobox': { type: 'composite', attributes: { allowed: ['aria-expanded', 'aria-autocomplete', 'aria-required', 'aria-activedescendant', 'aria-orientation'] }, owned: { all: ['listbox', 'textbox'] }, nameFrom: ['author'], context: null }, 'command': { nameFrom: ['author'], type: 'abstract' }, 'complementary': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['aside'] }, 'composite': { nameFrom: ['author'], type: 'abstract' }, 'contentinfo': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['footer'] }, 'definition': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['dd', 'dfn'] }, 'dialog': { type: 'widget', attributes: { allowed: ['aria-expanded', 'aria-modal'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['dialog'] }, 'directory': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: null }, 'document': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['body'] }, 'feed': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: { one: ['article'] }, nameFrom: ['author'], context: null }, 'form': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['form'] }, 'grid': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-expanded', 'aria-colcount', 'aria-level', 'aria-multiselectable', 'aria-readonly', 'aria-rowcount'] }, owned: { one: ['rowgroup', 'row'] }, nameFrom: ['author'], context: null, implicit: ['table'] }, 'gridcell': { type: 'widget', attributes: { allowed: ['aria-colindex', 'aria-colspan', 'aria-expanded', 'aria-rowindex', 'aria-rowspan', 'aria-selected', 'aria-readonly', 'aria-required'] }, owned: null, nameFrom: ['author', 'contents'], context: ['row'], implicit: ['td', 'th'] }, 'group': { type: 'structure', attributes: { allowed: ['aria-activedescendant', 'aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['details', 'optgroup'] }, 'heading': { type: 'structure', attributes: { allowed: ['aria-level', 'aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: null, implicit: ['h1', 'h2', 'h3', 'h4', 'h5', 'h6'] }, 'img': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['img'] }, 'input': { nameFrom: ['author'], type: 'abstract' }, 'landmark': { nameFrom: ['author'], type: 'abstract' }, 'link': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: null, implicit: ['a[href]'] }, 'list': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: { all: ['listitem'] }, nameFrom: ['author'], context: null, implicit: ['ol', 'ul', 'dl'] }, 'listbox': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-multiselectable', 'aria-required', 'aria-expanded', 'aria-orientation'] }, owned: { all: ['option'] }, nameFrom: ['author'], context: null, implicit: ['select'] }, 'listitem': { type: 'structure', attributes: { allowed: ['aria-level', 'aria-posinset', 'aria-setsize', 'aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: ['list'], implicit: ['li', 'dt'] }, 'log': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'main': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['main'] }, 'marquee': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'math': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['math'] }, 'menu': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-expanded', 'aria-orientation'] }, owned: { one: ['menuitem', 'menuitemradio', 'menuitemcheckbox'] }, nameFrom: ['author'], context: null, implicit: ['menu[type="context"]'] }, 'menubar': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-expanded', 'aria-orientation'] }, owned: null, nameFrom: ['author'], context: null }, 'menuitem': { type: 'widget', attributes: { allowed: ['aria-posinset', 'aria-setsize', 'aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: ['menu', 'menubar'], implicit: ['menuitem[type="command"]'] }, 'menuitemcheckbox': { type: 'widget', attributes: { allowed: ['aria-checked', 'aria-posinset', 'aria-setsize'] }, owned: null, nameFrom: ['author', 'contents'], context: ['menu', 'menubar'], implicit: ['menuitem[type="checkbox"]'] }, 'menuitemradio': { type: 'widget', attributes: { allowed: ['aria-checked', 'aria-selected', 'aria-posinset', 'aria-setsize'], }, owned: null, nameFrom: ['author', 'contents'], context: ['menu', 'menubar'], implicit: ['menuitem[type="radio"]'] }, 'navigation': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['nav'] }, 'none': { type: 'structure', attributes: null, owned: null, nameFrom: ['author'], context: null }, 'note': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'option': { type: 'widget', attributes: { allowed: ['aria-selected', 'aria-posinset', 'aria-setsize', 'aria-checked'] }, owned: null, nameFrom: ['author', 'contents'], context: ['listbox'], implicit: ['option'] }, 'presentation': { type: 'structure', attributes: null, owned: null, nameFrom: ['author'], context: null }, 'progressbar': { type: 'widget', attributes: { allowed: ['aria-valuetext', 'aria-valuenow', 'aria-valuemax', 'aria-valuemin'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['progress'] }, 'radio': { type: 'widget', attributes: { allowed: ['aria-checked', 'aria-selected', 'aria-posinset', 'aria-setsize'], }, owned: null, nameFrom: ['author', 'contents'], context: null, implicit: ['input[type="radio"]'] }, 'radiogroup': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-required', 'aria-expanded'] }, owned: { all: ['radio'] }, nameFrom: ['author'], context: null }, 'range': { nameFrom: ['author'], type: 'abstract' }, 'region': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['section'] }, 'roletype': { type: 'abstract' }, 'row': { type: 'structure', attributes: { allowed: ['aria-activedescendant', 'aria-colindex', 'aria-expanded', 'aria-level', 'aria-selected', 'aria-rowindex'] }, owned: { one: ['cell', 'columnheader', 'rowheader', 'gridcell'] }, nameFrom: ['author', 'contents'], context: ['rowgroup', 'grid', 'treegrid', 'table'], implicit: ['tr'] }, 'rowgroup': { type: 'structure', attributes: { allowed: ['aria-activedescendant', 'aria-expanded'] }, owned: { all: ['row'] }, nameFrom: ['author', 'contents'], context: ['grid', 'table'], implicit: ['tbody', 'thead', 'tfoot'] }, 'rowheader': { type: 'structure', attributes: { allowed: ['aria-colindex', 'aria-colspan', 'aria-expanded', 'aria-rowindex', 'aria-rowspan', 'aria-required', 'aria-readonly', 'aria-selected', 'aria-sort'] }, owned: null, nameFrom: ['author', 'contents'], context: ['row'], implicit: ['th'] }, 'scrollbar': { type: 'widget', attributes: { required: ['aria-controls', 'aria-valuenow', 'aria-valuemax', 'aria-valuemin'], allowed: ['aria-valuetext', 'aria-orientation'] }, owned: null, nameFrom: ['author'], context: null }, 'search': { type: 'landmark', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'searchbox': { type: 'widget', attributes: { allowed: ['aria-activedescendant', 'aria-autocomplete', 'aria-multiline', 'aria-readonly', 'aria-required', 'aria-placeholder'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['input[type="search"]'] }, 'section': { nameFrom: ['author', 'contents'], type: 'abstract' }, 'sectionhead': { nameFrom: ['author', 'contents'], type: 'abstract' }, 'select': { nameFrom: ['author'], type: 'abstract' }, 'separator': { type: 'structure', attributes: { allowed: ['aria-expanded', 'aria-orientation'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['hr'] }, 'slider': { type: 'widget', attributes: { allowed: ['aria-valuetext', 'aria-orientation'], required: ['aria-valuenow', 'aria-valuemax', 'aria-valuemin'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['input[type="range"]'] }, 'spinbutton': { type: 'widget', attributes: { allowed: ['aria-valuetext', 'aria-required'], required: ['aria-valuenow', 'aria-valuemax', 'aria-valuemin'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['input[type="number"]'] }, 'status': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['output'] }, 'structure': { type: 'abstract' }, 'switch': { type: 'widget', attributes: { required: ['aria-checked'] }, owned: null, nameFrom: ['author', 'contents'], context: null }, 'tab': { type: 'widget', attributes: { allowed: ['aria-selected', 'aria-expanded', 'aria-setsize', 'aria-posinset'] }, owned: null, nameFrom: ['author', 'contents'], context: ['tablist'] }, 'table': { type: 'structure', attributes: { allowed: ['aria-colcount', 'aria-rowcount'] }, owned: { one: ['rowgroup', 'row'] }, nameFrom: ['author'], context: null, implicit: ['table'] }, 'tablist': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-expanded', 'aria-level', 'aria-multiselectable', 'aria-orientation'] }, owned: { all: ['tab'] }, nameFrom: ['author'], context: null }, 'tabpanel': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'term': { type: 'structure', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: null, implicit: ['dt'] }, 'text': { type: 'structure', owned: null, nameFrom: ['author', 'contents'], context: null }, 'textbox': { type: 'widget', attributes: { allowed: ['aria-activedescendant', 'aria-autocomplete', 'aria-multiline', 'aria-readonly', 'aria-required', 'aria-placeholder'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['input[type="text"]', 'input[type="email"]', 'input[type="password"]', 'input[type="tel"]', 'input[type="url"]', 'input:not([type])', 'textarea'] }, 'timer': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author'], context: null }, 'toolbar': { type: 'structure', attributes: { allowed: ['aria-activedescendant', 'aria-expanded'] }, owned: null, nameFrom: ['author'], context: null, implicit: ['menu[type="toolbar"]'] }, 'tooltip': { type: 'widget', attributes: { allowed: ['aria-expanded'] }, owned: null, nameFrom: ['author', 'contents'], context: null }, 'tree': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-multiselectable', 'aria-required', 'aria-expanded', 'aria-orientation'] }, owned: { all: ['treeitem'] }, nameFrom: ['author'], context: null }, 'treegrid': { type: 'composite', attributes: { allowed: ['aria-activedescendant', 'aria-colcount', 'aria-expanded', 'aria-level', 'aria-multiselectable', 'aria-readonly', 'aria-required', 'aria-rowcount', 'aria-orientation'] }, owned: { one: ['rowgroup', 'row'] }, nameFrom: ['author'], context: null }, 'treeitem': { type: 'widget', attributes: { allowed: ['aria-checked', 'aria-selected', 'aria-expanded', 'aria-level', 'aria-posinset', 'aria-setsize'] }, owned: null, nameFrom: ['author', 'contents'], context: ['group', 'tree'] }, 'widget': { type: 'abstract' }, 'window': { nameFrom: ['author'], type: 'abstract' } };
1
11,786
I updated only the implicit roles who needed a update for this new rule to validate.
dequelabs-axe-core
js
@@ -7,11 +7,12 @@ package pb import ( + reflect "reflect" + sync "sync" + proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" ) const (
1
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.22.0 // protoc v3.7.1 // source: payment.proto package pb import ( proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 type Invoice struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields AgreementID uint64 `protobuf:"varint,1,opt,name=AgreementID,proto3" json:"AgreementID,omitempty"` AgreementTotal uint64 `protobuf:"varint,2,opt,name=AgreementTotal,proto3" json:"AgreementTotal,omitempty"` TransactorFee uint64 `protobuf:"varint,3,opt,name=TransactorFee,proto3" json:"TransactorFee,omitempty"` Hashlock string `protobuf:"bytes,4,opt,name=Hashlock,proto3" json:"Hashlock,omitempty"` Provider string `protobuf:"bytes,5,opt,name=Provider,proto3" json:"Provider,omitempty"` } func (x *Invoice) Reset() { *x = Invoice{} if protoimpl.UnsafeEnabled { mi := &file_payment_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Invoice) String() string { return protoimpl.X.MessageStringOf(x) } func (*Invoice) ProtoMessage() {} func (x *Invoice) ProtoReflect() protoreflect.Message { mi := &file_payment_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Invoice.ProtoReflect.Descriptor instead. func (*Invoice) Descriptor() ([]byte, []int) { return file_payment_proto_rawDescGZIP(), []int{0} } func (x *Invoice) GetAgreementID() uint64 { if x != nil { return x.AgreementID } return 0 } func (x *Invoice) GetAgreementTotal() uint64 { if x != nil { return x.AgreementTotal } return 0 } func (x *Invoice) GetTransactorFee() uint64 { if x != nil { return x.TransactorFee } return 0 } func (x *Invoice) GetHashlock() string { if x != nil { return x.Hashlock } return "" } func (x *Invoice) GetProvider() string { if x != nil { return x.Provider } return "" } type ExchangeMessage struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Promise *Promise `protobuf:"bytes,1,opt,name=Promise,proto3" json:"Promise,omitempty"` AgreementID uint64 `protobuf:"varint,2,opt,name=AgreementID,proto3" json:"AgreementID,omitempty"` AgreementTotal uint64 `protobuf:"varint,3,opt,name=AgreementTotal,proto3" json:"AgreementTotal,omitempty"` Provider string `protobuf:"bytes,4,opt,name=Provider,proto3" json:"Provider,omitempty"` Signature string `protobuf:"bytes,5,opt,name=Signature,proto3" json:"Signature,omitempty"` HermesID string `protobuf:"bytes,6,opt,name=HermesID,proto3" json:"HermesID,omitempty"` } func (x *ExchangeMessage) Reset() { *x = ExchangeMessage{} if protoimpl.UnsafeEnabled { mi := &file_payment_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ExchangeMessage) String() string { return protoimpl.X.MessageStringOf(x) } func (*ExchangeMessage) ProtoMessage() {} func (x *ExchangeMessage) ProtoReflect() protoreflect.Message { mi := &file_payment_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ExchangeMessage.ProtoReflect.Descriptor instead. func (*ExchangeMessage) Descriptor() ([]byte, []int) { return file_payment_proto_rawDescGZIP(), []int{1} } func (x *ExchangeMessage) GetPromise() *Promise { if x != nil { return x.Promise } return nil } func (x *ExchangeMessage) GetAgreementID() uint64 { if x != nil { return x.AgreementID } return 0 } func (x *ExchangeMessage) GetAgreementTotal() uint64 { if x != nil { return x.AgreementTotal } return 0 } func (x *ExchangeMessage) GetProvider() string { if x != nil { return x.Provider } return "" } func (x *ExchangeMessage) GetSignature() string { if x != nil { return x.Signature } return "" } func (x *ExchangeMessage) GetHermesID() string { if x != nil { return x.HermesID } return "" } type Promise struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields ChannelID []byte `protobuf:"bytes,1,opt,name=ChannelID,proto3" json:"ChannelID,omitempty"` Amount uint64 `protobuf:"varint,2,opt,name=Amount,proto3" json:"Amount,omitempty"` Fee uint64 `protobuf:"varint,3,opt,name=Fee,proto3" json:"Fee,omitempty"` Hashlock []byte `protobuf:"bytes,4,opt,name=Hashlock,proto3" json:"Hashlock,omitempty"` R []byte `protobuf:"bytes,5,opt,name=R,proto3" json:"R,omitempty"` Signature []byte `protobuf:"bytes,6,opt,name=Signature,proto3" json:"Signature,omitempty"` } func (x *Promise) Reset() { *x = Promise{} if protoimpl.UnsafeEnabled { mi := &file_payment_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Promise) String() string { return protoimpl.X.MessageStringOf(x) } func (*Promise) ProtoMessage() {} func (x *Promise) ProtoReflect() protoreflect.Message { mi := &file_payment_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Promise.ProtoReflect.Descriptor instead. func (*Promise) Descriptor() ([]byte, []int) { return file_payment_proto_rawDescGZIP(), []int{2} } func (x *Promise) GetChannelID() []byte { if x != nil { return x.ChannelID } return nil } func (x *Promise) GetAmount() uint64 { if x != nil { return x.Amount } return 0 } func (x *Promise) GetFee() uint64 { if x != nil { return x.Fee } return 0 } func (x *Promise) GetHashlock() []byte { if x != nil { return x.Hashlock } return nil } func (x *Promise) GetR() []byte { if x != nil { return x.R } return nil } func (x *Promise) GetSignature() []byte { if x != nil { return x.Signature } return nil } var File_payment_proto protoreflect.FileDescriptor var file_payment_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x70, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x22, 0xb1, 0x01, 0x0a, 0x07, 0x49, 0x6e, 0x76, 0x6f, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x24, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x46, 0x65, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x61, 0x73, 0x68, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x48, 0x61, 0x73, 0x68, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x22, 0xd8, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x52, 0x07, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x26, 0x0a, 0x0e, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x41, 0x67, 0x72, 0x65, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x65, 0x72, 0x6d, 0x65, 0x73, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x48, 0x65, 0x72, 0x6d, 0x65, 0x73, 0x49, 0x44, 0x22, 0x99, 0x01, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x41, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x46, 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x46, 0x65, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x48, 0x61, 0x73, 0x68, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x48, 0x61, 0x73, 0x68, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x0c, 0x0a, 0x01, 0x52, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x01, 0x52, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x06, 0x5a, 0x04, 0x2e, 0x3b, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_payment_proto_rawDescOnce sync.Once file_payment_proto_rawDescData = file_payment_proto_rawDesc ) func file_payment_proto_rawDescGZIP() []byte { file_payment_proto_rawDescOnce.Do(func() { file_payment_proto_rawDescData = protoimpl.X.CompressGZIP(file_payment_proto_rawDescData) }) return file_payment_proto_rawDescData } var file_payment_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_payment_proto_goTypes = []interface{}{ (*Invoice)(nil), // 0: pb.Invoice (*ExchangeMessage)(nil), // 1: pb.ExchangeMessage (*Promise)(nil), // 2: pb.Promise } var file_payment_proto_depIdxs = []int32{ 2, // 0: pb.ExchangeMessage.Promise:type_name -> pb.Promise 1, // [1:1] is the sub-list for method output_type 1, // [1:1] is the sub-list for method input_type 1, // [1:1] is the sub-list for extension type_name 1, // [1:1] is the sub-list for extension extendee 0, // [0:1] is the sub-list for field type_name } func init() { file_payment_proto_init() } func file_payment_proto_init() { if File_payment_proto != nil { return } if !protoimpl.UnsafeEnabled { file_payment_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Invoice); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_payment_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ExchangeMessage); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_payment_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Promise); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_payment_proto_rawDesc, NumEnums: 0, NumMessages: 3, NumExtensions: 0, NumServices: 0, }, GoTypes: file_payment_proto_goTypes, DependencyIndexes: file_payment_proto_depIdxs, MessageInfos: file_payment_proto_msgTypes, }.Build() File_payment_proto = out.File file_payment_proto_rawDesc = nil file_payment_proto_goTypes = nil file_payment_proto_depIdxs = nil }
1
16,515
> // Code generated by protoc-gen-go. DO NOT EDIT.
mysteriumnetwork-node
go
@@ -46,6 +46,9 @@ var ( utils.GcloudProdWrapperLatest: "gcloud", utils.GcloudLatestWrapperLatest: "gcloud", } + // Apply this as instance metadata if the OS config agent is not + // supported for the platform or version being imported. + skipOSConfig = map[string]string{"osconfig_not_supported": "true"} ) type ovfInstanceImportTestProperties struct {
1
// Copyright 2019 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package ovfinstanceimporttestsuite contains e2e tests for instance import cli tools package ovfinstanceimporttestsuite import ( "context" "fmt" "io/ioutil" "log" "os" "regexp" "strings" "sync" "time" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/paramhelper" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/path" computeUtils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools_e2e_test/common/compute" "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools_e2e_test/common/utils" daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute" "github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/junitxml" testconfig "github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/test_config" ) const ( testSuiteName = "OVFInstanceImportTests" ovaBucket = "compute-image-tools-test-resources" ) var ( cmds = map[utils.CLITestType]string{ utils.Wrapper: "./gce_ovf_import", utils.GcloudProdWrapperLatest: "gcloud", utils.GcloudLatestWrapperLatest: "gcloud", } ) type ovfInstanceImportTestProperties struct { instanceName string isWindows bool expectedStartupOutput string verificationStartupScript string zone string sourceURI string os string machineType string network string subnet string } // TestSuite is image import test suite. func TestSuite( ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite, logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp, testProjectConfig *testconfig.Project) { testsMap := map[utils.CLITestType]map[*junitxml.TestCase]func( context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, utils.CLITestType){} testTypes := []utils.CLITestType{ utils.Wrapper, utils.GcloudProdWrapperLatest, utils.GcloudLatestWrapperLatest, } for _, testType := range testTypes { instanceImportUbuntu3DisksTestCase := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Ubuntu 3 disks, one data disk larger than 10GB")) instanceImportCentos68 := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Centos 6.8")) instanceImportWindows2012R2TwoDisks := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Windows 2012 R2 two disks")) instanceImportWindows2016 := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Windows 2016")) instanceImportWindows2008R2FourNICs := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Windows 2008r2 - Four NICs")) instanceImportDebian9 := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Debian 9")) instanceImportUbuntu16FromVirtualBox := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Ubuntu 1604 from Virtualbox")) instanceImportUbuntu16FromAWS := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Ubuntu 1604 from AWS")) instanceImportNetworkSettingsName := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Test network setting (name only)")) instanceImportNetworkSettingsPath := junitxml.NewTestCase( testSuiteName, fmt.Sprintf("[%v][OVFInstanceImport] %v", testType, "Test network setting (path)")) testsMap[testType] = map[*junitxml.TestCase]func( context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, utils.CLITestType){} testsMap[testType][instanceImportUbuntu3DisksTestCase] = runOVFInstanceImportUbuntu3Disks testsMap[testType][instanceImportCentos68] = runOVFInstanceImportCentos68 testsMap[testType][instanceImportWindows2012R2TwoDisks] = runOVFInstanceImportWindows2012R2TwoDisks testsMap[testType][instanceImportWindows2016] = runOVFInstanceImportWindows2016 testsMap[testType][instanceImportWindows2008R2FourNICs] = runOVFInstanceImportWindows2008R2FourNICs testsMap[testType][instanceImportDebian9] = runOVFInstanceImportDebian9 testsMap[testType][instanceImportUbuntu16FromVirtualBox] = runOVFInstanceImportUbuntu16FromVirtualBox testsMap[testType][instanceImportUbuntu16FromAWS] = runOVFInstanceImportUbuntu16FromAWS testsMap[testType][instanceImportNetworkSettingsName] = runOVFInstanceImportNetworkSettingsName testsMap[testType][instanceImportNetworkSettingsPath] = runOVFInstanceImportNetworkSettingsPath } utils.CLITestSuite(ctx, tswg, testSuites, logger, testSuiteRegex, testCaseRegex, testProjectConfig, testSuiteName, testsMap) } func runOVFInstanceImportUbuntu3Disks(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-ubuntu-3-disks-%v", suffix), verificationStartupScript: loadScriptContent( "scripts/ovf_import_test_ubuntu_3_disks.sh", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All tests passed!", sourceURI: fmt.Sprintf("gs://%v/ova/ubuntu-1604-three-disks", ovaBucket), os: "ubuntu-1604", machineType: "n1-standard-4"} runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportCentos68(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-centos-6-%v", suffix), verificationStartupScript: loadScriptContent( "daisy_integration_tests/scripts/post_translate_test.sh", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All tests passed!", sourceURI: fmt.Sprintf("gs://%v/", ovaBucket), os: "centos-6", machineType: "n1-standard-4", } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportWindows2012R2TwoDisks(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-w2k12-r2-%v", suffix), verificationStartupScript: loadScriptContent( "scripts/ovf_import_test_windows_two_disks.ps1", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All Tests Passed", sourceURI: fmt.Sprintf("gs://%v/ova/w2k12-r2", ovaBucket), os: "windows-2012r2", machineType: "n1-standard-8", isWindows: true, } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportWindows2016(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-w2k16-%v", suffix), verificationStartupScript: loadScriptContent( "daisy_integration_tests/scripts/post_translate_test.ps1", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All Tests Passed", sourceURI: fmt.Sprintf("gs://%v/ova/w2k16/w2k16.ovf", ovaBucket), os: "windows-2016", machineType: "n2-standard-2", isWindows: true, } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportWindows2008R2FourNICs(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-w2k8r2-%v", suffix), verificationStartupScript: loadScriptContent( "daisy_integration_tests/scripts/post_translate_test.ps1", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All Tests Passed", sourceURI: fmt.Sprintf("gs://%v/ova/win2008r2-all-updates-four-nic.ova", ovaBucket), os: "windows-2008r2", isWindows: true, } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportDebian9(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) // no startup script as this OVA has issues running it (possibly due to no SSH allowed) // b/141321520 props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-debian-9-%v", suffix), zone: testProjectConfig.TestZone, sourceURI: fmt.Sprintf("gs://%v/ova/bitnami-tomcat-8.5.43-0-linux-debian-9-x86_64.ova", ovaBucket), os: "debian-9", machineType: "n1-standard-4", } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportUbuntu16FromVirtualBox(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-virtualbox-6-%v", suffix), verificationStartupScript: loadScriptContent( "daisy_integration_tests/scripts/post_translate_test.sh", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All tests passed!", sourceURI: fmt.Sprintf("gs://%v/ova/ubuntu-16.04-virtualbox.ova", ovaBucket), os: "ubuntu-1604", machineType: "n1-standard-4", } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportUbuntu16FromAWS(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-instance-aws-ova-ubuntu-1604-%v", suffix), zone: testProjectConfig.TestZone, sourceURI: fmt.Sprintf("gs://%v/ova/aws-ova-ubuntu-1604.ova", ovaBucket), os: "ubuntu-1604", machineType: "n1-standard-4", } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportNetworkSettingsName(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-network-name-%v", suffix), verificationStartupScript: loadScriptContent( "daisy_integration_tests/scripts/post_translate_test.sh", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All tests passed!", sourceURI: fmt.Sprintf("gs://%v/", ovaBucket), os: "centos-6", machineType: "n1-standard-4", network: fmt.Sprintf("%v-vpc-1", testProjectConfig.TestProjectID), subnet: fmt.Sprintf("%v-subnet-1", testProjectConfig.TestProjectID), } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func runOVFInstanceImportNetworkSettingsPath(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger, testProjectConfig *testconfig.Project, testType utils.CLITestType) { suffix := path.RandString(5) region, _ := paramhelper.GetRegion(testProjectConfig.TestZone) props := &ovfInstanceImportTestProperties{ instanceName: fmt.Sprintf("test-network-path-%v", suffix), verificationStartupScript: loadScriptContent( "daisy_integration_tests/scripts/post_translate_test.sh", logger), zone: testProjectConfig.TestZone, expectedStartupOutput: "All tests passed!", sourceURI: fmt.Sprintf("gs://%v/", ovaBucket), os: "centos-6", machineType: "n1-standard-4", network: fmt.Sprintf("global/networks/%v-vpc-1", testProjectConfig.TestProjectID), subnet: fmt.Sprintf("projects/%v/regions/%v/subnetworks/%v-subnet-1", testProjectConfig.TestProjectID, region, testProjectConfig.TestProjectID), } runOVFInstanceImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props) } func buildTestArgs(props *ovfInstanceImportTestProperties, testProjectConfig *testconfig.Project) map[utils.CLITestType][]string { gcloudArgs := []string{ "beta", "compute", "instances", "import", props.instanceName, "--quiet", "--docker-image-tag=latest", fmt.Sprintf("--project=%v", testProjectConfig.TestProjectID), fmt.Sprintf("--source-uri=%v", props.sourceURI), fmt.Sprintf("--zone=%v", testProjectConfig.TestZone), } wrapperArgs := []string{"-client-id=e2e", fmt.Sprintf("-project=%v", testProjectConfig.TestProjectID), fmt.Sprintf("-instance-names=%s", props.instanceName), fmt.Sprintf("-ovf-gcs-path=%v", props.sourceURI), fmt.Sprintf("-zone=%v", testProjectConfig.TestZone), fmt.Sprintf("-build-id=%v", path.RandString(10)), } if props.os != "" { gcloudArgs = append(gcloudArgs, fmt.Sprintf("--os=%v", props.os)) wrapperArgs = append(wrapperArgs, fmt.Sprintf("-os=%v", props.os)) } if props.machineType != "" { gcloudArgs = append(gcloudArgs, fmt.Sprintf("--machine-type=%v", props.machineType)) wrapperArgs = append(wrapperArgs, fmt.Sprintf("-machine-type=%v", props.machineType)) } if props.network != "" { gcloudArgs = append(gcloudArgs, fmt.Sprintf("--network=%v", props.network)) wrapperArgs = append(wrapperArgs, fmt.Sprintf("-network=%v", props.network)) } if props.subnet != "" { gcloudArgs = append(gcloudArgs, fmt.Sprintf("--subnet=%v", props.subnet)) wrapperArgs = append(wrapperArgs, fmt.Sprintf("-subnet=%v", props.subnet)) } argsMap := map[utils.CLITestType][]string{ utils.Wrapper: wrapperArgs, utils.GcloudProdWrapperLatest: gcloudArgs, utils.GcloudLatestWrapperLatest: gcloudArgs, } return argsMap } func runOVFInstanceImportTest(ctx context.Context, args []string, testType utils.CLITestType, testProjectConfig *testconfig.Project, logger *log.Logger, testCase *junitxml.TestCase, props *ovfInstanceImportTestProperties) { if utils.RunTestForTestType(cmds[testType], args, testType, logger, testCase) { verifyImportedInstance(ctx, testCase, testProjectConfig, logger, props) } } func verifyImportedInstance( ctx context.Context, testCase *junitxml.TestCase, testProjectConfig *testconfig.Project, logger *log.Logger, props *ovfInstanceImportTestProperties) { client, err := daisyCompute.NewClient(ctx) if err != nil { utils.Failure(testCase, logger, fmt.Sprintf("Error creating client: %v", err)) return } logger.Printf("Verifying imported instance...") instance, err := computeUtils.CreateInstanceObject(ctx, testProjectConfig.TestProjectID, props.zone, props.instanceName, props.isWindows) if err != nil { utils.Failure(testCase, logger, fmt.Sprintf("Image '%v' doesn't exist after import: %v", props.instanceName, err)) return } defer func() { logger.Printf("Deleting instance `%v`", props.instanceName) if err := instance.Cleanup(); err != nil { logger.Printf("Instance '%v' failed to clean up: %v", props.instanceName, err) } else { logger.Printf("Instance '%v' cleaned up.", props.instanceName) } }() // The boot disk for a Windows instance must have the WINDOWS GuestOSFeature, // while the boot disk for other operating systems shouldn't have it. for _, disk := range instance.Disks { if !disk.Boot { continue } hasWindowsFeature := false for _, feature := range disk.GuestOsFeatures { if "WINDOWS" == feature.Type { hasWindowsFeature = true break } } if props.isWindows && !hasWindowsFeature { testCase.WriteFailure( "Windows boot disk missing WINDOWS GuestOsFeature. Features found=%v", disk.GuestOsFeatures) } else if !props.isWindows && hasWindowsFeature { testCase.WriteFailure( "Non-Windows boot disk includes WINDOWS GuestOsFeature. Features found=%v", disk.GuestOsFeatures) } } if props.machineType != "" && !strings.HasSuffix(instance.MachineType, props.machineType) { testCase.WriteFailure( "Instance machine type `%v` doesn't match the expected machine type `%v`", instance.MachineType, props.machineType) return } if !strings.HasSuffix(instance.Zone, props.zone) { utils.Failure(testCase, logger, fmt.Sprintf("Instance zone `%v` doesn't match requested zone `%v`", instance.Zone, props.zone)) return } logger.Printf("[%v] Stopping instance before restarting with test startup script", props.instanceName) err = client.StopInstance( testProjectConfig.TestProjectID, props.zone, props.instanceName) if err != nil { testCase.WriteFailure("Error stopping imported instance: %v", err) return } if props.verificationStartupScript == "" { logger.Printf("[%v] Will not set test startup script to instance metadata as it's not defined", props.instanceName) return } err = instance.StartWithScriptCode(props.verificationStartupScript) if err != nil { testCase.WriteFailure("Error starting instance `%v` with script: %v", props.instanceName, err) return } logger.Printf("[%v] Waiting for `%v` in instance serial console.", props.instanceName, props.expectedStartupOutput) if err := instance.WaitForSerialOutput( props.expectedStartupOutput, 1, 5*time.Second, 15*time.Minute); err != nil { testCase.WriteFailure("Error during VM validation: %v", err) } } func loadScriptContent(scriptPath string, logger *log.Logger) string { scriptContent, err := ioutil.ReadFile(scriptPath) if err != nil { logger.Printf("Error loading script `%v`: %v", scriptPath, err) os.Exit(1) } return string(scriptContent) }
1
11,954
minor: skipOSConfigMetadata, otherwise, the name sounds like a boolean
GoogleCloudPlatform-compute-image-tools
go
@@ -56,7 +56,9 @@ public class NSRSS20 extends Namespace { boolean validType = false; boolean validUrl = !TextUtils.isEmpty(url); - if (type == null) { + if(SyndTypeUtils.enclosureTypeValid(type)) { + validType = true; + } else { type = SyndTypeUtils.getMimeTypeFromUrl(url); }
1
package de.danoeh.antennapod.core.syndication.namespace; import android.text.TextUtils; import android.util.Log; import org.xml.sax.Attributes; import de.danoeh.antennapod.core.feed.Feed; import de.danoeh.antennapod.core.feed.FeedImage; import de.danoeh.antennapod.core.feed.FeedItem; import de.danoeh.antennapod.core.feed.FeedMedia; import de.danoeh.antennapod.core.syndication.handler.HandlerState; import de.danoeh.antennapod.core.syndication.util.SyndTypeUtils; import de.danoeh.antennapod.core.util.DateUtils; /** * SAX-Parser for reading RSS-Feeds * * @author daniel * */ public class NSRSS20 extends Namespace { private static final String TAG = "NSRSS20"; private static final String NSTAG = "rss"; private static final String NSURI = ""; public static final String CHANNEL = "channel"; public static final String ITEM = "item"; private static final String GUID = "guid"; private static final String TITLE = "title"; private static final String LINK = "link"; private static final String DESCR = "description"; private static final String PUBDATE = "pubDate"; private static final String ENCLOSURE = "enclosure"; private static final String IMAGE = "image"; private static final String URL = "url"; private static final String LANGUAGE = "language"; private static final String ENC_URL = "url"; private static final String ENC_LEN = "length"; private static final String ENC_TYPE = "type"; @Override public SyndElement handleElementStart(String localName, HandlerState state, Attributes attributes) { if (ITEM.equals(localName)) { state.setCurrentItem(new FeedItem()); state.getItems().add(state.getCurrentItem()); state.getCurrentItem().setFeed(state.getFeed()); } else if (ENCLOSURE.equals(localName)) { String type = attributes.getValue(ENC_TYPE); String url = attributes.getValue(ENC_URL); boolean validType = false; boolean validUrl = !TextUtils.isEmpty(url); if (type == null) { type = SyndTypeUtils.getMimeTypeFromUrl(url); } if(SyndTypeUtils.enclosureTypeValid(type)) { validType = true; } if (state.getCurrentItem() != null && state.getCurrentItem().getMedia() == null && validType && validUrl) { long size = 0; try { size = Long.parseLong(attributes.getValue(ENC_LEN)); if(size < 16384) { // less than 16kb is suspicious, check manually size = 0; } } catch (NumberFormatException e) { Log.d(TAG, "Length attribute could not be parsed."); } FeedMedia media = new FeedMedia(state.getCurrentItem(), url, size, type); state.getCurrentItem().setMedia(media); } } else if (IMAGE.equals(localName)) { if (state.getTagstack().size() >= 1) { String parent = state.getTagstack().peek().getName(); if (CHANNEL.equals(parent)) { Feed feed = state.getFeed(); if(feed != null && feed.getImage() == null) { feed.setImage(new FeedImage()); feed.getImage().setOwner(state.getFeed()); } } } } return new SyndElement(localName, this); } @Override public void handleElementEnd(String localName, HandlerState state) { if (ITEM.equals(localName)) { if (state.getCurrentItem() != null) { FeedItem currentItem = state.getCurrentItem(); // the title tag is optional in RSS 2.0. The description is used // as a // title if the item has no title-tag. if (currentItem.getTitle() == null) { currentItem.setTitle(currentItem.getDescription()); } if (state.getTempObjects().containsKey(NSITunes.DURATION)) { if (currentItem.hasMedia()) { Integer duration = (Integer) state.getTempObjects().get(NSITunes.DURATION); currentItem.getMedia().setDuration(duration); } state.getTempObjects().remove(NSITunes.DURATION); } } state.setCurrentItem(null); } else if (state.getTagstack().size() >= 2 && state.getContentBuf() != null) { String content = state.getContentBuf().toString(); SyndElement topElement = state.getTagstack().peek(); String top = topElement.getName(); SyndElement secondElement = state.getSecondTag(); String second = secondElement.getName(); String third = null; if (state.getTagstack().size() >= 3) { third = state.getThirdTag().getName(); } if (GUID.equals(top) && ITEM.equals(second)) { // some feed creators include an empty or non-standard guid-element in their feed, which should be ignored if (!TextUtils.isEmpty(content) && state.getCurrentItem() != null) { state.getCurrentItem().setItemIdentifier(content); } } else if (TITLE.equals(top)) { String title = content.trim(); if (ITEM.equals(second) && state.getCurrentItem() != null) { state.getCurrentItem().setTitle(title); } else if (CHANNEL.equals(second) && state.getFeed() != null) { state.getFeed().setTitle(title); } else if (IMAGE.equals(second) && CHANNEL.equals(third)) { if(state.getFeed() != null && state.getFeed().getImage() != null && state.getFeed().getImage().getTitle() == null) { state.getFeed().getImage().setTitle(title); } } } else if (LINK.equals(top)) { if (CHANNEL.equals(second) && state.getFeed() != null) { state.getFeed().setLink(content); } else if (ITEM.equals(second) && state.getCurrentItem() != null) { state.getCurrentItem().setLink(content); } } else if (PUBDATE.equals(top) && ITEM.equals(second) && state.getCurrentItem() != null) { state.getCurrentItem().setPubDate(DateUtils.parse(content)); } else if (URL.equals(top) && IMAGE.equals(second) && CHANNEL.equals(third)) { // prefer itunes:image if(state.getFeed() != null && state.getFeed().getImage() != null && state.getFeed().getImage().getDownload_url() == null) { state.getFeed().getImage().setDownload_url(content); } } else if (DESCR.equals(localName)) { if (CHANNEL.equals(second) && state.getFeed() != null) { state.getFeed().setDescription(content); } else if (ITEM.equals(second) && state.getCurrentItem() != null) { state.getCurrentItem().setDescription(content); } } else if (LANGUAGE.equals(localName) && state.getFeed() != null) { state.getFeed().setLanguage(content.toLowerCase()); } } } }
1
13,454
~~I think lines 65-67 should be moved into this else clause after `type = ...` Else, it is a bit confusing why we check the enclose type validity a second time~~ Nevermind, will do some refactoring, myself. But thanks for "fixing" this!
AntennaPod-AntennaPod
java
@@ -239,3 +239,16 @@ func (client *Client) Stop() error { return nil } + +// GetSessions returns all sessions from history +func (client *Client) GetSessions() (endpoints.SessionsDTO, error) { + response, err := client.http.Get("sessions", url.Values{}) + if err != nil { + return endpoints.SessionsDTO{}, err + } + defer response.Body.Close() + + var sessions endpoints.SessionsDTO + err = parseResponseJSON(response, &sessions) + return sessions, err +}
1
/* * Copyright (C) 2017 The "MysteriumNetwork/node" Authors. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package client import ( "errors" "fmt" "net/url" "github.com/mysteriumnetwork/node/tequilapi/endpoints" ) // NewClient returns a new instance of Client func NewClient(ip string, port int) *Client { return &Client{ http: newHTTPClient( fmt.Sprintf("http://%s:%d", ip, port), "[Tequilapi.Client] ", "goclient-v0.1", ), } } // Client is able perform remote requests to Tequilapi server type Client struct { http httpClientInterface } // GetIdentities returns a list of client identities func (client *Client) GetIdentities() (ids []IdentityDTO, err error) { response, err := client.http.Get("identities", url.Values{}) if err != nil { return } defer response.Body.Close() var list IdentityList err = parseResponseJSON(response, &list) return list.Identities, err } // NewIdentity creates a new client identity func (client *Client) NewIdentity(passphrase string) (id IdentityDTO, err error) { payload := struct { Passphrase string `json:"passphrase"` }{ passphrase, } response, err := client.http.Post("identities", payload) if err != nil { return } defer response.Body.Close() err = parseResponseJSON(response, &id) return id, err } // RegisterIdentity registers given identity func (client *Client) RegisterIdentity(address string) (err error) { payload := struct { Registered bool `json:"registered"` }{ true, } response, err := client.http.Put("identities/"+address+"/registration", payload) if err != nil { return } defer response.Body.Close() return nil } // IdentityRegistrationStatus returns information of identity needed to register it on blockchain func (client *Client) IdentityRegistrationStatus(address string) (RegistrationDataDTO, error) { response, err := client.http.Get("identities/"+address+"/registration", url.Values{}) if err != nil { return RegistrationDataDTO{}, err } defer response.Body.Close() status := RegistrationDataDTO{} err = parseResponseJSON(response, &status) return status, err } // Connect initiates a new connection to a host identified by providerID func (client *Client) Connect(consumerID, providerID string, options endpoints.ConnectOptions) (status StatusDTO, err error) { payload := struct { Identity string `json:"consumerId"` ProviderID string `json:"providerId"` Options endpoints.ConnectOptions `json:"connectOptions"` }{ consumerID, providerID, options, } response, err := client.http.Put("connection", payload) var errorMessage struct { Message string `json:"message"` } if err != nil { err = parseResponseJSON(response, &errorMessage) if err != nil { return } err = errors.New(errorMessage.Message) return } defer response.Body.Close() err = parseResponseJSON(response, &status) return status, err } // Disconnect terminates current connection func (client *Client) Disconnect() (err error) { response, err := client.http.Delete("connection", nil) if err != nil { return } defer response.Body.Close() return nil } // ConnectionStatistics returns statistics about current connection func (client *Client) ConnectionStatistics() (StatisticsDTO, error) { response, err := client.http.Get("connection/statistics", url.Values{}) if err != nil { return StatisticsDTO{}, err } defer response.Body.Close() var statistics StatisticsDTO err = parseResponseJSON(response, &statistics) return statistics, err } // Status returns connection status func (client *Client) Status() (StatusDTO, error) { response, err := client.http.Get("connection", url.Values{}) if err != nil { return StatusDTO{}, err } defer response.Body.Close() var status StatusDTO err = parseResponseJSON(response, &status) return status, err } // Healthcheck returns a healthcheck info func (client *Client) Healthcheck() (healthcheck HealthcheckDTO, err error) { response, err := client.http.Get("healthcheck", url.Values{}) if err != nil { return } defer response.Body.Close() err = parseResponseJSON(response, &healthcheck) return healthcheck, err } // Proposals returns all available proposals for services func (client *Client) Proposals() ([]ProposalDTO, error) { response, err := client.http.Get("proposals", url.Values{}) if err != nil { return []ProposalDTO{}, err } defer response.Body.Close() var proposals ProposalList err = parseResponseJSON(response, &proposals) return proposals.Proposals, err } // GetIP returns public ip func (client *Client) GetIP() (string, error) { response, err := client.http.Get("connection/ip", url.Values{}) if err != nil { return "", err } defer response.Body.Close() var ipData struct { IP string `json:"ip"` } err = parseResponseJSON(response, &ipData) return ipData.IP, err } // Unlock allows using identity in following commands func (client *Client) Unlock(identity, passphrase string) error { path := fmt.Sprintf("identities/%s/unlock", identity) payload := struct { Passphrase string `json:"passphrase"` }{ passphrase, } response, err := client.http.Put(path, payload) if err != nil { return err } defer response.Body.Close() return nil } // Stop kills mysterium client func (client *Client) Stop() error { emptyPayload := struct{}{} response, err := client.http.Post("/stop", emptyPayload) if err != nil { return err } defer response.Body.Close() return nil }
1
12,518
`sessions := endpoints.SessionsDTO{}` and using only `sessions` should be simpler.
mysteriumnetwork-node
go
@@ -5242,8 +5242,11 @@ master_signal_handler_C(byte *xsp) /* Ensure we didn't get the app's sigstack into our frame. On Mac, the kernel * doesn't use the frame's uc_stack, so we limit this to Linux. + * The pointers may be different if a thread is on its way to exit, and the app's + * sigstack was already restored (i#3369). */ - IF_LINUX(ASSERT(dcontext == NULL || dcontext == GLOBAL_DCONTEXT || + IF_LINUX(ASSERT(dcontext == NULL || dcontext->is_exiting || + dcontext == GLOBAL_DCONTEXT || frame->uc.uc_stack.ss_sp == ((thread_sig_info_t *)dcontext->signal_field)->sigstack.ss_sp));
1
/* ********************************************************** * Copyright (c) 2011-2019 Google, Inc. All rights reserved. * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2000-2001 Hewlett-Packard Company */ /* * signal.c - dynamorio signal handler */ #include <errno.h> #undef errno #include "signal_private.h" /* pulls in globals.h for us, in right order */ /* We want to build on older toolchains so we have our own copy of signal * data structures */ #include "include/siginfo.h" #ifdef LINUX # include "include/sigcontext.h" # include "include/signalfd.h" # include "../globals.h" /* after our sigcontext.h, to preclude bits/sigcontext.h */ #elif defined(MACOS) # include "../globals.h" /* this defines _XOPEN_SOURCE for Mac */ # include <signal.h> /* after globals.h, for _XOPEN_SOURCE from os_exports.h */ #endif #ifdef LINUX # include <linux/sched.h> #endif #include <sys/time.h> #include <sys/types.h> #include <sys/wait.h> #include <ucontext.h> #include <string.h> /* for memcpy and memset */ #include "os_private.h" #include "../fragment.h" #include "../fcache.h" #include "../perfctr.h" #include "arch.h" #include "../monitor.h" /* for trace_abort */ #include "../link.h" /* for linking interrupted fragment_t */ #include "instr.h" /* to find target of SIGSEGV */ #include "decode.h" /* to find target of SIGSEGV */ #include "decode_fast.h" /* to handle self-mod code */ #include "../synch.h" #include "../nudge.h" #include "disassemble.h" #include "ksynch.h" #include "tls.h" /* tls_reinstate_selector */ #include "../translate.h" #ifdef LINUX # include "include/syscall.h" #else # include <sys/syscall.h> #endif #ifdef CLIENT_INTERFACE # include "instrument.h" #endif #ifdef VMX86_SERVER # include <errno.h> #endif /* Define the Linux names, which the code is already using */ #ifndef SA_NOMASK # define SA_NOMASK SA_NODEFER #endif #ifndef SA_ONESHOT # define SA_ONESHOT SA_RESETHAND #endif #ifndef SS_AUTODISARM # define SS_AUTODISARM (1U << 31) #endif #ifndef SS_FLAG_BITS # define SS_FLAG_BITS SS_AUTODISARM #endif /**** data structures ***************************************************/ /* The signal numbers are slightly different between operating systems. * To support differing default actions, we have separate arrays, rather * than indirecting to a single all-signals array. */ extern int default_action[]; /* We know that many signals are always asynchronous. * Others, however, may be synchronous or may not -- e.g., another process * could send us a SIGSEGV, and there is no way we can tell whether it * was generated by a real memory fault or not. Thus we have to assume * that we must not delay any SIGSEGV deliveries. */ extern bool can_always_delay[]; static inline bool sig_is_alarm_signal(int sig) { return (sig == SIGALRM || sig == SIGVTALRM || sig == SIGPROF); } /* we do not use SIGSTKSZ b/c for things like code modification * we end up calling many core routines and so want more space * (though currently non-debug stack size == SIGSTKSZ (8KB)) */ #define SIGSTACK_SIZE (DYNAMO_OPTION(signal_stack_size)) /* this flag not defined in our headers */ #define SA_RESTORER 0x04000000 /* if no app sigaction, it's RT, since that's our handler */ #ifdef LINUX # define IS_RT_FOR_APP(info, sig) \ IF_X64_ELSE(true, \ ((info)->app_sigaction[(sig)] == NULL \ ? true \ : (TEST(SA_SIGINFO, (info)->app_sigaction[(sig)]->flags)))) #elif defined(MACOS) # define IS_RT_FOR_APP(info, sig) (true) #endif /* kernel sets size and sp to 0 for SS_DISABLE * when asked, will hand back SS_ONSTACK only if current xsp is inside the * alt stack; otherwise, if an alt stack is registered, it will give flags of 0 * We do not support the "legacy stack switching" that uses the restorer field * as seen in kernel sources. */ #define APP_HAS_SIGSTACK(info) \ ((info)->app_sigstack.ss_sp != NULL && (info)->app_sigstack.ss_flags != SS_DISABLE) /* Under normal circumstances the app_sigaction is lazily initialized when the * app registers a signal handler, but during detach there are points where we * are still intercepting signals after app_sigaction has been set to * zeros. To be extra defensive, we do a NULL check. */ #define USE_APP_SIGSTACK(info, sig) \ (APP_HAS_SIGSTACK(info) && (info)->app_sigaction[sig] != NULL && \ TEST(SA_ONSTACK, (info)->app_sigaction[sig]->flags)) /* If we only intercept a few signals, we leave whether un-intercepted signals * are blocked unchanged and stored in the kernel. If we intercept all (not * quite yet: PR 297033, hence the need for this macro) we emulate the mask for * all. */ #define EMULATE_SIGMASK(info, sig) \ (DYNAMO_OPTION(intercept_all_signals) || (info)->we_intercept[(sig)]) /* i#27: custom data to pass to the child of a clone */ /* PR i#149/403015: clone record now passed via a new dstack */ typedef struct _clone_record_t { byte *dstack; /* dstack for new thread - allocated by parent thread */ #ifdef MACOS /* XXX i#1403: once we have lower-level, earlier thread interception we can * likely switch to something closer to what we do on Linux. * This is used for bsdthread_create, where app_thread_xsp is NULL; * for vfork, app_thread_xsp is non-NULL and this is unused. */ void *thread_arg; #endif reg_t app_thread_xsp; /* app xsp preserved for new thread to use */ app_pc continuation_pc; thread_id_t caller_id; int clone_sysnum; uint clone_flags; thread_sig_info_t info; thread_sig_info_t *parent_info; void *pcprofile_info; #ifdef AARCHXX /* To ensure we have the right value as of the point of the clone, we * store it here (we'll have races if we try to get it during new thread * init). */ reg_t app_stolen_value; # ifndef AARCH64 dr_isa_mode_t isa_mode; # endif /* To ensure we have the right app lib tls base in child thread, * we store it here if necessary (clone w/o CLONE_SETTLS or vfork). */ void *app_lib_tls_base; #endif /* we leave some padding at base of stack for dynamorio_clone * to store values */ reg_t for_dynamorio_clone[4]; } __attribute__((__aligned__(ABI_STACK_ALIGNMENT))) clone_record_t; /* i#350: set up signal handler for safe_read/faults during init */ static thread_sig_info_t init_info; static kernel_sigset_t init_sigmask; #ifdef DEBUG static bool removed_sig_handler; #endif os_cxt_ptr_t osc_empty; /**** function prototypes ***********************************************/ /* in x86.asm */ void master_signal_handler(int sig, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt); static void set_handler_and_record_app(dcontext_t *dcontext, thread_sig_info_t *info, int sig, kernel_sigaction_t *act); static void intercept_signal(dcontext_t *dcontext, thread_sig_info_t *info, int sig); static void signal_info_init_sigaction(dcontext_t *dcontext, thread_sig_info_t *info); static void signal_info_exit_sigaction(dcontext_t *dcontext, thread_sig_info_t *info, bool other_thread); static bool execute_handler_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *our_frame, sigcontext_t *sc_orig, fragment_t *f _IF_CLIENT(byte *access_address)); static bool execute_handler_from_dispatch(dcontext_t *dcontext, int sig); /* Execute default action from code cache and may terminate the process. * If returns, the return value decides if caller should restore * the untranslated context. */ static bool execute_default_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, sigcontext_t *sc_orig, bool forged); static void execute_default_from_dispatch(dcontext_t *dcontext, int sig, sigframe_rt_t *frame); static bool handle_alarm(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt); static bool handle_suspend_signal(dcontext_t *dcontext, kernel_ucontext_t *ucxt, sigframe_rt_t *frame); static bool handle_nudge_signal(dcontext_t *dcontext, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt); static void init_itimer(dcontext_t *dcontext, bool first); static bool set_actual_itimer(dcontext_t *dcontext, int which, thread_sig_info_t *info, bool enable); static bool alarm_signal_has_DR_only_itimer(dcontext_t *dcontext, int signal); #ifdef DEBUG static void dump_sigset(dcontext_t *dcontext, kernel_sigset_t *set); #endif static bool is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, kernel_siginfo_t *info); int sigaction_syscall(int sig, kernel_sigaction_t *act, kernel_sigaction_t *oact) { #if !defined(VMX86_SERVER) && defined(LINUX) /* PR 305020: must have SA_RESTORER for x64 */ /* i#2812: must have SA_RESTORER to handle vsyscall32 being disabled */ if (act != NULL && !TEST(SA_RESTORER, act->flags)) { act->flags |= SA_RESTORER; act->restorer = (void (*)(void))dynamorio_sigreturn; } #endif return dynamorio_syscall(IF_MACOS_ELSE(SYS_sigaction, SYS_rt_sigaction), 4, sig, act, oact, sizeof(kernel_sigset_t)); } static inline bool signal_is_interceptable(int sig) { return (sig != SIGKILL && sig != SIGSTOP); } static inline int sigaltstack_syscall(const stack_t *newstack, stack_t *oldstack) { return dynamorio_syscall(SYS_sigaltstack, 2, newstack, oldstack); } static inline int getitimer_syscall(int which, struct itimerval *val) { return dynamorio_syscall(SYS_getitimer, 2, which, val); } static inline int setitimer_syscall(int which, struct itimerval *val, struct itimerval *old) { return dynamorio_syscall(SYS_setitimer, 3, which, val, old); } static inline int sigprocmask_syscall(int how, kernel_sigset_t *set, kernel_sigset_t *oset, size_t sigsetsize) { return dynamorio_syscall(IF_MACOS_ELSE(SYS_sigprocmask, SYS_rt_sigprocmask), 4, how, set, oset, sigsetsize); } void block_all_signals_except(kernel_sigset_t *oset, int num_signals, ... /* list of signals */) { kernel_sigset_t set; kernel_sigfillset(&set); va_list ap; va_start(ap, num_signals); for (int i = 0; i < num_signals; ++i) { kernel_sigdelset(&set, va_arg(ap, int)); } va_end(ap); sigprocmask_syscall(SIG_SETMASK, &set, oset, sizeof(set)); } static void unblock_all_signals(kernel_sigset_t *oset) { kernel_sigset_t set; kernel_sigemptyset(&set); sigprocmask_syscall(SIG_SETMASK, &set, oset, sizeof(set)); } /* exported for stackdump.c */ bool set_default_signal_action(int sig) { kernel_sigset_t set; kernel_sigaction_t act; int rc; memset(&act, 0, sizeof(act)); act.handler = (handler_t)SIG_DFL; /* arm the signal */ rc = sigaction_syscall(sig, &act, NULL); DODEBUG({ removed_sig_handler = true; }); /* If we're in our handler now, we have to unblock */ kernel_sigemptyset(&set); kernel_sigaddset(&set, sig); sigprocmask_syscall(SIG_UNBLOCK, &set, NULL, sizeof(set)); return (rc == 0); } static bool set_ignore_signal_action(int sig) { kernel_sigaction_t act; int rc; memset(&act, 0, sizeof(act)); act.handler = (handler_t)SIG_IGN; /* arm the signal */ rc = sigaction_syscall(sig, &act, NULL); return (rc == 0); } /* We assume that signal handlers will be shared most of the time * (pthreads shares them) * Rather than start out with the handler table in local memory and then * having to transfer to global, we just always use global */ static void handler_free(dcontext_t *dcontext, void *p, size_t size) { global_heap_free(p, size HEAPACCT(ACCT_OTHER)); } static void * handler_alloc(dcontext_t *dcontext, size_t size) { return global_heap_alloc(size HEAPACCT(ACCT_OTHER)); } /**** top-level routines ***********************************************/ static bool os_itimers_thread_shared(void) { static bool itimers_shared; static bool cached = false; if (!cached) { file_t f = os_open("/proc/version", OS_OPEN_READ); if (f != INVALID_FILE) { char buf[128]; int major, minor, rel; os_read(f, buf, BUFFER_SIZE_ELEMENTS(buf)); NULL_TERMINATE_BUFFER(buf); if (sscanf(buf, "%*s %*s %d.%d.%d", &major, &minor, &rel) == 3) { /* Linux NPTL in kernel 2.6.12+ has POSIX-style itimers shared * among threads. */ LOG(GLOBAL, LOG_ASYNCH, 1, "kernel version = %d.%d.%d\n", major, minor, rel); itimers_shared = ((major == 2 && minor >= 6 && rel >= 12) || (major >= 3 /* linux-3.0 or above */)); cached = true; } os_close(f); } if (!cached) { /* assume not shared */ itimers_shared = false; cached = true; } LOG(GLOBAL, LOG_ASYNCH, 1, "itimers are %s\n", itimers_shared ? "thread-shared" : "thread-private"); } return itimers_shared; } static void unset_initial_crash_handlers(dcontext_t *dcontext) { ASSERT(init_info.app_sigaction != NULL); signal_info_exit_sigaction(GLOBAL_DCONTEXT, &init_info, false /*!other_thread*/); /* Undo the unblock-all */ sigprocmask_syscall(SIG_SETMASK, &init_sigmask, NULL, sizeof(init_sigmask)); DOLOG(2, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 2, "initial app signal mask:\n"); dump_sigset(dcontext, &init_sigmask); }); } void signal_init(void) { kernel_sigset_t set; IF_LINUX(IF_X86_64(ASSERT(ALIGNED(offsetof(sigpending_t, xstate), AVX_ALIGNMENT)))); IF_MACOS(ASSERT(sizeof(kernel_sigset_t) == sizeof(__darwin_sigset_t))); os_itimers_thread_shared(); /* Set up a handler for safe_read (or other fault detection) during * DR init before thread is initialized. * * XXX: could set up a clone_record_t and pass to the initial * signal_thread_inherit() but that would require further code changes. * Could also call signal_thread_inherit to init this, but we don't want * to intercept timer signals, etc. before we're ready to handle them, * so we do a partial init. */ signal_info_init_sigaction(GLOBAL_DCONTEXT, &init_info); intercept_signal(GLOBAL_DCONTEXT, &init_info, SIGSEGV); intercept_signal(GLOBAL_DCONTEXT, &init_info, SIGBUS); kernel_sigemptyset(&set); kernel_sigaddset(&set, SIGSEGV); kernel_sigaddset(&set, SIGBUS); sigprocmask_syscall(SIG_UNBLOCK, &set, &init_sigmask, sizeof(set)); IF_LINUX(signalfd_init()); signal_arch_init(); } void signal_exit() { IF_LINUX(signalfd_exit()); #ifdef DEBUG if (stats->loglevel > 0 && (stats->logmask & (LOG_ASYNCH | LOG_STATS)) != 0) { LOG(GLOBAL, LOG_ASYNCH | LOG_STATS, 1, "Total signals delivered: %d\n", GLOBAL_STAT(num_signals)); } #endif } #ifdef HAVE_SIGALTSTACK /* Separated out to run from the dstack (i#2016: see below). */ static void set_our_alt_stack(void *arg) { thread_sig_info_t *info = (thread_sig_info_t *)arg; DEBUG_DECLARE(int rc =) sigaltstack_syscall(&info->sigstack, &info->app_sigstack); ASSERT(rc == 0); } #endif void signal_thread_init(dcontext_t *dcontext, void *os_data) { thread_sig_info_t *info = HEAP_TYPE_ALLOC(dcontext, thread_sig_info_t, ACCT_OTHER, PROTECTED); size_t pend_unit_size = sizeof(sigpending_t) + /* include alignment for xsave on xstate */ signal_frame_extra_size(true) /* sigpending_t has xstate inside it already */ IF_LINUX(IF_X86(-sizeof(kernel_xstate_t))); IF_LINUX(IF_X86(ASSERT(!YMM_ENABLED() || ALIGNED(pend_unit_size, AVX_ALIGNMENT)))); /* all fields want to be initialized to 0 */ memset(info, 0, sizeof(thread_sig_info_t)); dcontext->signal_field = (void *)info; /* our special heap to avoid reentrancy problems * composed entirely of sigpending_t units * Note that it's fine to have the special heap do page-at-a-time * committing, which does not use locks (unless triggers reset!), * but if we need a new unit that will grab a lock: we try to * avoid that by limiting the # of pending alarm signals (PR 596768). */ info->sigheap = special_heap_init_aligned( pend_unit_size, IF_X86_ELSE(AVX_ALIGNMENT, 0), false /* cannot have any locking */, false /* -x */, true /* persistent */, pend_unit_size * DYNAMO_OPTION(max_pending_signals)); #ifdef HAVE_SIGALTSTACK /* set up alternate stack * i#552 we may terminate the process without freeing the stack, so we * stack_alloc it to exempt from the memory leak check. */ info->sigstack.ss_sp = (char *)stack_alloc(SIGSTACK_SIZE, NULL) - SIGSTACK_SIZE; info->sigstack.ss_size = SIGSTACK_SIZE; /* kernel will set xsp to sp+size to grow down from there, we don't have to */ info->sigstack.ss_flags = 0; /* i#2016: for late takeover, this app thread may already be on its own alt * stack. Not setting SA_ONSTACK for SUSPEND_SIGNAL is not sufficient to avoid * this, as our SUSPEND_SIGNAL can interrupt the app inside its own signal * handler. Thus, we simply swap to another stack temporarily to avoid the * kernel complaining. The dstack is set up but it has the clone record and * initial mcxt, so we use the new alt stack. */ call_switch_stack((void *)info, (byte *)info->sigstack.ss_sp + info->sigstack.ss_size, set_our_alt_stack, NULL, true /*return*/); LOG(THREAD, LOG_ASYNCH, 1, "signal stack is " PFX " - " PFX "\n", info->sigstack.ss_sp, info->sigstack.ss_sp + info->sigstack.ss_size); /* app_sigstack dealt with below, based on parentage */ #endif kernel_sigemptyset(&info->app_sigblocked); ASSIGN_INIT_LOCK_FREE(info->child_lock, child_lock); /* signal_thread_inherit() finishes per-thread init and is invoked * by os_thread_init_finalize(): we need it after synch_thread_init() and * other post-os_thread_init() setup b/c we can't yet record pending signals, * but we need it before we give up thread_initexit_lock so we can handle * our own suspend signals (i#2779). */ } bool is_thread_signal_info_initialized(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; return info->fully_initialized; } /* i#27: create custom data to pass to the child of a clone * since we can't rely on being able to find the caller, or that * its syscall data is still valid, once in the child. * * i#149/ PR 403015: The clone record is passed to the new thread via the dstack * created for it. Unlike before, where the child thread would create its own * dstack, now the parent thread creates the dstack. Also, switches app stack * to dstack. * * XXX i#1403: for Mac we want to eventually do lower-level earlier interception * of threads, but for now we're later and higher-level, intercepting the user * thread function on the new thread's stack. We ignore app_thread_xsp. */ void * #ifdef MACOS create_clone_record(dcontext_t *dcontext, reg_t *app_thread_xsp, app_pc thread_func, void *thread_arg) #else create_clone_record(dcontext_t *dcontext, reg_t *app_thread_xsp) #endif { clone_record_t *record; byte *dstack = stack_alloc(DYNAMORIO_STACK_SIZE, NULL); LOG(THREAD, LOG_ASYNCH, 1, "create_clone_record: dstack for new thread is " PFX "\n", dstack); #ifdef MACOS if (app_thread_xsp == NULL) { record = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, clone_record_t, ACCT_THREAD_MGT, true /*prot*/); record->app_thread_xsp = 0; record->continuation_pc = thread_func; record->thread_arg = thread_arg; record->clone_flags = CLONE_THREAD | CLONE_VM | CLONE_SIGHAND | SIGCHLD; } else { #endif /* Note, the stack grows to low memory addr, so dstack points to the high * end of the allocated stack region. So, we must subtract to get space for * the clone record. */ record = (clone_record_t *)(dstack - sizeof(clone_record_t)); ASSERT(ALIGNED(record, get_ABI_stack_alignment())); record->app_thread_xsp = *app_thread_xsp; /* asynch_target is set in dispatch() prior to calling pre_system_call(). */ record->continuation_pc = dcontext->asynch_target; record->clone_flags = dcontext->sys_param0; #ifdef MACOS } #endif LOG(THREAD, LOG_ASYNCH, 1, "allocated clone record: " PFX "\n", record); record->dstack = dstack; record->caller_id = dcontext->owning_thread; record->clone_sysnum = dcontext->sys_num; record->info = *((thread_sig_info_t *)dcontext->signal_field); /* Sigstack is not inherited so clear it now to avoid having to figure out * where it got its value in signal_thread_inherit (i#3116). */ memset(&record->info.app_sigstack, 0, sizeof(record->info.app_sigstack)); record->info.app_sigstack.ss_flags = SS_DISABLE; record->parent_info = (thread_sig_info_t *)dcontext->signal_field; record->pcprofile_info = dcontext->pcprofile_field; #ifdef AARCHXX record->app_stolen_value = get_stolen_reg_val(get_mcontext(dcontext)); # ifndef AARCH64 record->isa_mode = dr_get_isa_mode(dcontext); # endif /* If the child thread shares the same TLS with parent by not setting * CLONE_SETTLS or vfork, we put the TLS base here and clear the * thread register in new_thread_setup, so that DR can distinguish * this case from normal pthread thread creation. */ record->app_lib_tls_base = (!TEST(CLONE_SETTLS, record->clone_flags)) ? os_get_app_tls_base(dcontext, TLS_REG_LIB) : NULL; #endif LOG(THREAD, LOG_ASYNCH, 1, "create_clone_record: thread " TIDFMT ", pc " PFX "\n", record->caller_id, record->continuation_pc); #ifdef MACOS if (app_thread_xsp != NULL) { #endif /* Set the thread stack to point to the dstack, below the clone record. * Note: it's glibc who sets up the arg to the thread start function; * the kernel just does a fork + stack swap, so we can get away w/ our * own stack swap if we restore before the glibc asm code takes over. * We restore this parameter to the app value in * restore_clone_param_from_clone_record(). */ /* i#754: set stack to be XSTATE aligned for saving YMM registers */ ASSERT(ALIGNED(XSTATE_ALIGNMENT, REGPARM_END_ALIGN)); *app_thread_xsp = ALIGN_BACKWARD(record, XSTATE_ALIGNMENT); #ifdef MACOS } #endif return (void *)record; } /* This is to support dr_create_client_thread() */ void set_clone_record_fields(void *record, reg_t app_thread_xsp, app_pc continuation_pc, uint clone_sysnum, uint clone_flags) { clone_record_t *rec = (clone_record_t *)record; ASSERT(rec != NULL); rec->app_thread_xsp = app_thread_xsp; rec->continuation_pc = continuation_pc; rec->clone_sysnum = clone_sysnum; rec->clone_flags = clone_flags; } /* i#149/PR 403015: The clone record is passed to the new thread by placing it * at the bottom of the dstack, i.e., the high memory. So the new thread gets * it from the base of the dstack. The dstack is then set as the app stack. * * CAUTION: don't use a lot of stack in this routine as it gets invoked on the * dstack from new_thread_setup - this is because this routine assumes * no more than a page of dstack has been used so far since the clone * system call was done. */ void * get_clone_record(reg_t xsp) { clone_record_t *record; byte *dstack_base; /* xsp should be in a dstack, i.e., dynamorio heap. */ ASSERT(is_dynamo_address((app_pc)xsp)); /* The (size of the clone record + * stack used by new_thread_start (only for setting up priv_mcontext_t) + * stack used by new_thread_setup before calling get_clone_record()) * is less than a page. This is verified by the assert below. If it does * exceed a page, it won't happen at random during runtime, but in a * predictable way during development, which will be caught by the assert. * The current usage is about 800 bytes for clone_record + * sizeof(priv_mcontext_t) + few words in new_thread_setup before * get_clone_record() is called. */ dstack_base = (byte *)ALIGN_FORWARD(xsp, PAGE_SIZE); record = (clone_record_t *)(dstack_base - sizeof(clone_record_t)); /* dstack_base and the dstack in the clone record should be the same. */ ASSERT(dstack_base == record->dstack); #ifdef MACOS ASSERT(record->app_thread_xsp != 0); /* else it's not in dstack */ #endif return (void *)record; } /* i#149/PR 403015: App xsp is passed to the new thread via the clone record. */ reg_t get_clone_record_app_xsp(void *record) { ASSERT(record != NULL); return ((clone_record_t *)record)->app_thread_xsp; } #ifdef MACOS void * get_clone_record_thread_arg(void *record) { ASSERT(record != NULL); return ((clone_record_t *)record)->thread_arg; } #endif byte * get_clone_record_dstack(void *record) { ASSERT(record != NULL); return ((clone_record_t *)record)->dstack; } #ifdef AARCHXX reg_t get_clone_record_stolen_value(void *record) { ASSERT(record != NULL); return ((clone_record_t *)record)->app_stolen_value; } # ifndef AARCH64 uint /* dr_isa_mode_t but we have a header ordering problem */ get_clone_record_isa_mode(void *record) { ASSERT(record != NULL); return ((clone_record_t *)record)->isa_mode; } # endif void set_thread_register_from_clone_record(void *record) { /* If record->app_lib_tls_base is not NULL, it means the parent * thread did not setup TLS for the child, and we need clear the * thread register. */ if (((clone_record_t *)record)->app_lib_tls_base != NULL) write_thread_register(NULL); } void set_app_lib_tls_base_from_clone_record(dcontext_t *dcontext, void *record) { if (((clone_record_t *)record)->app_lib_tls_base != NULL) { /* child and parent share the same TLS */ os_set_app_tls_base(dcontext, TLS_REG_LIB, ((clone_record_t *)record)->app_lib_tls_base); } } #endif void restore_clone_param_from_clone_record(dcontext_t *dcontext, void *record) { #ifdef LINUX ASSERT(record != NULL); clone_record_t *crec = (clone_record_t *)record; if (crec->clone_sysnum == SYS_clone && TEST(CLONE_VM, crec->clone_flags)) { /* Restore the original stack parameter to the syscall, which we clobbered * in create_clone_record(). Some apps examine it post-syscall (i#3171). */ set_syscall_param(dcontext, SYSCALL_PARAM_CLONE_STACK, get_mcontext(dcontext)->xsp); } #endif } /* Initializes info's app_sigaction, restorer_valid, and we_intercept fields */ static void signal_info_init_sigaction(dcontext_t *dcontext, thread_sig_info_t *info) { info->app_sigaction = (kernel_sigaction_t **)handler_alloc( dcontext, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *)); memset(info->app_sigaction, 0, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *)); memset(&info->restorer_valid, -1, SIGARRAY_SIZE * sizeof(info->restorer_valid[0])); info->we_intercept = (bool *)handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(bool)); memset(info->we_intercept, 0, SIGARRAY_SIZE * sizeof(bool)); } /* Cleans up info's app_sigaction and we_intercept entries */ static void signal_info_exit_sigaction(dcontext_t *dcontext, thread_sig_info_t *info, bool other_thread) { int i; kernel_sigaction_t act; memset(&act, 0, sizeof(act)); act.handler = (handler_t)SIG_DFL; kernel_sigemptyset(&act.mask); /* does mask matter for SIG_DFL? */ for (i = 1; i <= MAX_SIGNUM; i++) { if (sig_is_alarm_signal(i) && doing_detach && alarm_signal_has_DR_only_itimer(dcontext, i)) { /* We ignore alarms *during* detach in signal_remove_alarm_handlers(), * but to avoid crashing on an alarm arriving post-detach we set to * SIG_IGN if we have an itimer and the app does not (a slight * transparency violation to gain robustness: i#2270). */ set_ignore_signal_action(i); } else if (!other_thread) { if (info->app_sigaction[i] != NULL) { /* Restore to old handler, but not if exiting whole process: * else may get itimer during cleanup, so we set to SIG_IGN. We * do this during detach in signal_remove_alarm_handlers() (and * post-detach above). */ if (dynamo_exited && !doing_detach) { info->app_sigaction[i]->handler = (handler_t)SIG_IGN; } LOG(THREAD, LOG_ASYNCH, 2, "\trestoring " PFX " as handler for %d\n", info->app_sigaction[i]->handler, i); sigaction_syscall(i, info->app_sigaction[i], NULL); } else if (info->we_intercept[i]) { /* restore to default */ LOG(THREAD, LOG_ASYNCH, 2, "\trestoring SIG_DFL as handler for %d\n", i); sigaction_syscall(i, &act, NULL); } } if (info->app_sigaction[i] != NULL) { handler_free(dcontext, info->app_sigaction[i], sizeof(kernel_sigaction_t)); } } handler_free(dcontext, info->app_sigaction, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *)); info->app_sigaction = NULL; handler_free(dcontext, info->we_intercept, SIGARRAY_SIZE * sizeof(bool)); info->we_intercept = NULL; } /* Called to finalize per-thread initialization. * Inherited and shared fields are set up here. * The clone_record contains the continuation pc, which is stored in dcontext->next_tag. */ void signal_thread_inherit(dcontext_t *dcontext, void *clone_record) { clone_record_t *record = (clone_record_t *)clone_record; thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; if (record != NULL) { LOG(THREAD, LOG_ASYNCH, 1, "continuation pc is " PFX "\n", record->continuation_pc); dcontext->next_tag = record->continuation_pc; LOG(THREAD, LOG_ASYNCH, 1, "parent tid is " TIDFMT ", parent sysnum is %d(%s), clone flags=" PIFX "\n", record->caller_id, record->clone_sysnum, #ifdef SYS_vfork (record->clone_sysnum == SYS_vfork) ? "vfork" : #endif (IF_LINUX(record->clone_sysnum == SYS_clone ? "clone" :) IF_MACOS( record->clone_sysnum == SYS_bsdthread_create ? "bsdthread_create" :) "unexpected"), record->clone_flags); #ifdef SYS_vfork if (record->clone_sysnum == SYS_vfork) { /* The above clone_flags argument is bogus. SYS_vfork doesn't have a free register to keep the hardcoded value see /usr/src/linux/arch/i386/kernel/process.c */ /* CHECK: is this the only place real clone flags are needed? */ record->clone_flags = CLONE_VFORK | CLONE_VM | SIGCHLD; } #endif /* handlers are either inherited or shared */ if (TEST(CLONE_SIGHAND, record->clone_flags)) { /* need to share table of handlers! */ LOG(THREAD, LOG_ASYNCH, 2, "sharing signal handlers with parent\n"); info->shared_app_sigaction = true; info->shared_refcount = record->info.shared_refcount; info->shared_lock = record->info.shared_lock; info->app_sigaction = record->info.app_sigaction; info->we_intercept = record->info.we_intercept; mutex_lock(info->shared_lock); (*info->shared_refcount)++; #ifdef DEBUG for (i = 1; i <= MAX_SIGNUM; i++) { if (info->app_sigaction[i] != NULL) { LOG(THREAD, LOG_ASYNCH, 2, "\thandler for signal %d is " PFX "\n", i, info->app_sigaction[i]->handler); } } #endif mutex_unlock(info->shared_lock); } else { /* copy handlers */ LOG(THREAD, LOG_ASYNCH, 2, "inheriting signal handlers from parent\n"); info->app_sigaction = (kernel_sigaction_t **)handler_alloc( dcontext, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *)); memset(info->app_sigaction, 0, SIGARRAY_SIZE * sizeof(kernel_sigaction_t *)); for (i = 1; i <= MAX_SIGNUM; i++) { info->restorer_valid[i] = -1; /* clear cache */ if (record->info.app_sigaction[i] != NULL) { info->app_sigaction[i] = (kernel_sigaction_t *)handler_alloc( dcontext, sizeof(kernel_sigaction_t)); memcpy(info->app_sigaction[i], record->info.app_sigaction[i], sizeof(kernel_sigaction_t)); LOG(THREAD, LOG_ASYNCH, 2, "\thandler for signal %d is " PFX "\n", i, info->app_sigaction[i]->handler); } } info->we_intercept = (bool *)handler_alloc(dcontext, SIGARRAY_SIZE * sizeof(bool)); memcpy(info->we_intercept, record->info.we_intercept, SIGARRAY_SIZE * sizeof(bool)); mutex_lock(&record->info.child_lock); record->info.num_unstarted_children--; mutex_unlock(&record->info.child_lock); /* this should be safe since parent should wait for us */ mutex_lock(&record->parent_info->child_lock); record->parent_info->num_unstarted_children--; mutex_unlock(&record->parent_info->child_lock); } /* itimers are either private or shared */ if (TEST(CLONE_THREAD, record->clone_flags) && os_itimers_thread_shared()) { ASSERT(record->info.shared_itimer); LOG(THREAD, LOG_ASYNCH, 2, "sharing itimers with parent\n"); info->shared_itimer = true; info->shared_itimer_refcount = record->info.shared_itimer_refcount; info->shared_itimer_underDR = record->info.shared_itimer_underDR; info->itimer = record->info.itimer; atomic_add_exchange_int((volatile int *)info->shared_itimer_refcount, 1); /* shared_itimer_underDR will be incremented in start_itimer() */ } else { info->shared_itimer = false; init_itimer(dcontext, false /*!first thread*/); } /* rest of state is never shared. * app_sigstack should already be in place, when we set up our sigstack * we asked for old sigstack. * FIXME: are current pending or blocked inherited? */ #ifdef MACOS if (record->app_thread_xsp != 0) { HEAP_TYPE_FREE(GLOBAL_DCONTEXT, record, clone_record_t, ACCT_THREAD_MGT, true /*prot*/); } #endif } else { /* Initialize in isolation */ if (APP_HAS_SIGSTACK(info)) { /* parent was NOT under our control, so the real sigstack we see is * a real sigstack that was present before we took control */ LOG(THREAD, LOG_ASYNCH, 1, "app already has signal stack " PFX " - " PFX "\n", info->app_sigstack.ss_sp, info->app_sigstack.ss_sp + info->app_sigstack.ss_size); } signal_info_init_sigaction(dcontext, info); info->shared_itimer = false; /* we'll set to true if a child is created */ init_itimer(dcontext, true /*first*/); /* We split init vs start for the signal handlers and mask. We do not * install ours until we start running the app, to avoid races like * i#2335. We'll set them up when os_process_under_dynamorio_*() invokes * signal_reinstate_handlers(). All we do now is mark which signals we * want to intercept. */ if (DYNAMO_OPTION(intercept_all_signals)) { /* PR 304708: to support client signal handlers without * the complexity of per-thread and per-signal callbacks * we always intercept all signals. We also check here * for handlers the app registered before our init. */ for (i = 1; i <= MAX_SIGNUM; i++) { /* cannot intercept KILL or STOP */ if (signal_is_interceptable(i) && /* FIXME PR 297033: we don't support intercepting DEFAULT_STOP / * DEFAULT_CONTINUE signals. Once add support, update * dr_register_signal_event() comments. */ default_action[i] != DEFAULT_STOP && default_action[i] != DEFAULT_CONTINUE) info->we_intercept[i] = true; } } else { /* we intercept the following signals ourselves: */ info->we_intercept[SIGSEGV] = true; /* PR 313665: look for DR crashes on unaligned memory or mmap bounds */ info->we_intercept[SIGBUS] = true; /* PR 212090: the signal we use to suspend threads */ info->we_intercept[SUSPEND_SIGNAL] = true; #ifdef PAPI /* use SIGPROF for updating gui so it can be distinguished from SIGVTALRM */ info->we_intercept[SIGPROF] = true; #endif /* vtalarm only used with pc profiling. it interferes w/ PAPI * so arm this signal only if necessary */ if (INTERNAL_OPTION(profile_pcs)) { info->we_intercept[SIGVTALRM] = true; } #ifdef CLIENT_INTERFACE info->we_intercept[SIGALRM] = true; #endif #ifdef SIDELINE info->we_intercept[SIGCHLD] = true; #endif /* i#61/PR 211530: the signal we use for nudges */ info->we_intercept[NUDGESIG_SIGNUM] = true; } /* should be 1st thread */ if (get_num_threads() > 1) ASSERT_NOT_REACHED(); } /* only when SIGVTALRM handler is in place should we start itimer (PR 537743) */ if (INTERNAL_OPTION(profile_pcs)) { /* even if the parent thread exits, we can use a pointer to its * pcprofile_info b/c when shared it's process-shared and is not freed * until the entire process exits */ pcprofile_thread_init(dcontext, info->shared_itimer, (record == NULL) ? NULL : record->pcprofile_info); } info->pre_syscall_app_sigprocmask_valid = false; /* Assumed to be async safe. */ info->fully_initialized = true; } /* When taking over existing app threads, we assume they're using pthreads and * expect to share signal handlers, memory, thread group id, etc. * Invokes dynamo_thread_init() with the appropriate os_data. */ dcontext_t * init_thread_with_shared_siginfo(priv_mcontext_t *mc, dcontext_t *takeover_dc) { clone_record_t crec = { 0, }; thread_sig_info_t *parent_siginfo = (thread_sig_info_t *)takeover_dc->signal_field; /* Create a fake clone record with the given siginfo. All threads in the * same thread group must share signal handlers since Linux 2.5.35, but we * have to guess at the other flags. * FIXME i#764: If we take over non-pthreads threads, we'll need some way to * tell if they're sharing signal handlers or not. */ crec.caller_id = takeover_dc->owning_thread; #ifdef LINUX crec.clone_sysnum = SYS_clone; #else ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#58: NYI on Mac */ #endif crec.clone_flags = PTHREAD_CLONE_FLAGS; crec.parent_info = parent_siginfo; crec.info = *parent_siginfo; crec.pcprofile_info = takeover_dc->pcprofile_field; IF_DEBUG(int r =) dynamo_thread_init(NULL, mc, &crec _IF_CLIENT_INTERFACE(false)); ASSERT(r == SUCCESS); return get_thread_private_dcontext(); } static void free_pending_signal(thread_sig_info_t *info, int sig) { sigpending_t *temp = info->sigpending[sig]; info->sigpending[sig] = temp->next; special_heap_free(info->sigheap, temp); info->num_pending--; } /* This is split from os_fork_init() so the new logfiles are available * (xref i#189/PR 452168). It had to be after dynamo_other_thread_exit() * called in dynamorio_fork_init() after os_fork_init() else we clean * up data structs used in signal_thread_exit(). */ void signal_fork_init(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; /* Child of fork is a single thread in a new process so should * start over w/ no sharing (xref i#190/PR 452178) */ if (info->shared_app_sigaction) { info->shared_app_sigaction = false; if (info->shared_lock != NULL) { DELETE_LOCK(*info->shared_lock); global_heap_free(info->shared_lock, sizeof(mutex_t) HEAPACCT(ACCT_OTHER)); } if (info->shared_refcount != NULL) global_heap_free(info->shared_refcount, sizeof(int) HEAPACCT(ACCT_OTHER)); info->shared_lock = NULL; info->shared_refcount = NULL; } if (info->shared_itimer) { /* itimers are not inherited across fork */ info->shared_itimer = false; for (i = 0; i < NUM_ITIMERS; i++) DELETE_RECURSIVE_LOCK((*info->itimer)[i].lock); if (os_itimers_thread_shared()) global_heap_free(info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER)); else heap_free(dcontext, info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER)); info->itimer = NULL; /* reset by init_itimer */ ASSERT(info->shared_itimer_refcount != NULL); global_heap_free(info->shared_itimer_refcount, sizeof(int) HEAPACCT(ACCT_OTHER)); info->shared_itimer_refcount = NULL; ASSERT(info->shared_itimer_underDR != NULL); global_heap_free(info->shared_itimer_underDR, sizeof(int) HEAPACCT(ACCT_OTHER)); info->shared_itimer_underDR = NULL; init_itimer(dcontext, true /*first*/); } info->num_unstarted_children = 0; for (i = 1; i <= MAX_SIGNUM; i++) { /* "A child created via fork(2) initially has an empty pending signal set" */ dcontext->signals_pending = 0; while (info->sigpending[i] != NULL) { free_pending_signal(info, i); } info->num_pending = 0; } if (INTERNAL_OPTION(profile_pcs)) { pcprofile_fork_init(dcontext); } info->pre_syscall_app_sigprocmask_valid = false; /* Assumed to be async safe. */ info->fully_initialized = true; } #ifdef DEBUG static bool sigsegv_handler_is_ours(void) { int rc; kernel_sigaction_t oldact; rc = sigaction_syscall(SIGSEGV, NULL, &oldact); return (rc == 0 && oldact.handler == (handler_t)master_signal_handler); } #endif /* DEBUG */ #if defined(X86) && defined(LINUX) static byte * get_xstate_buffer(dcontext_t *dcontext) { /* See thread_sig_info_t.xstate_buf comments for why this is in TLS. */ thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; if (info->xstate_buf == NULL) { info->xstate_alloc = heap_alloc(dcontext, signal_frame_extra_size(true) HEAPACCT(ACCT_OTHER)); info->xstate_buf = (byte *)ALIGN_FORWARD(info->xstate_alloc, XSTATE_ALIGNMENT); ASSERT(info->xstate_alloc + signal_frame_extra_size(true) >= info->xstate_buf + signal_frame_extra_size(false)); } return info->xstate_buf; } #endif void signal_thread_exit(dcontext_t *dcontext, bool other_thread) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; /* i#1012: DR's signal handler should always be installed before this point. */ ASSERT(sigsegv_handler_is_ours() || removed_sig_handler); while (info->num_unstarted_children > 0) { /* must wait for children to start and copy our state * before we destroy it! */ os_thread_yield(); } /* stop_itimer() was already called by os_thread_not_under_dynamo() called * from dynamo_thread_exit_common(). We need to leave the app itimers in place * in case we're detaching. */ #if defined(X86) && defined(LINUX) if (info->xstate_alloc != NULL) { heap_free(dcontext, info->xstate_alloc, signal_frame_extra_size(true) HEAPACCT(ACCT_OTHER)); } #endif /* FIXME: w/ shared handlers, if parent (the owner here) dies, * can children keep living w/ a copy of the handlers? */ if (info->shared_app_sigaction) { mutex_lock(info->shared_lock); (*info->shared_refcount)--; mutex_unlock(info->shared_lock); } if (!info->shared_app_sigaction || *info->shared_refcount == 0) { LOG(THREAD, LOG_ASYNCH, 2, "signal handler cleanup:\n"); signal_info_exit_sigaction(dcontext, info, other_thread); if (info->shared_lock != NULL) { DELETE_LOCK(*info->shared_lock); global_heap_free(info->shared_lock, sizeof(mutex_t) HEAPACCT(ACCT_OTHER)); } if (info->shared_refcount != NULL) global_heap_free(info->shared_refcount, sizeof(int) HEAPACCT(ACCT_OTHER)); } if (info->shared_itimer) { atomic_add_exchange_int((volatile int *)info->shared_itimer_refcount, -1); } if (!info->shared_itimer || *info->shared_itimer_refcount == 0) { if (INTERNAL_OPTION(profile_pcs)) { /* no cleanup needed for non-final thread in group */ pcprofile_thread_exit(dcontext); } for (i = 0; i < NUM_ITIMERS; i++) DELETE_RECURSIVE_LOCK((*info->itimer)[i].lock); if (os_itimers_thread_shared()) global_heap_free(info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER)); else heap_free(dcontext, info->itimer, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER)); if (info->shared_itimer_refcount != NULL) { global_heap_free(info->shared_itimer_refcount, sizeof(int) HEAPACCT(ACCT_OTHER)); ASSERT(info->shared_itimer_underDR != NULL); global_heap_free(info->shared_itimer_underDR, sizeof(int) HEAPACCT(ACCT_OTHER)); } } for (i = 1; i <= MAX_SIGNUM; i++) { /* pending queue is per-thread and not shared */ while (info->sigpending[i] != NULL) { sigpending_t *temp = info->sigpending[i]; info->sigpending[i] = temp->next; special_heap_free(info->sigheap, temp); } info->num_pending = 0; } /* If no detach flag is set, we assume that this thread is on its way to exit. * In order to prevent receiving signals while a thread is on its way to exit * without a valid dcontext, signals at this stage are blocked. The exceptions * are the suspend signal and any signal that a terminating SYS_kill may need. * (i#2921). In this case, we do not want to restore the signal mask. For detach, * we do need to restore the app's mask. */ if (!other_thread && doing_detach) signal_swap_mask(dcontext, true /*to_app*/); #ifdef HAVE_SIGALTSTACK /* Remove our sigstack and restore the app sigstack if it had one. */ if (!other_thread) { LOG(THREAD, LOG_ASYNCH, 2, "removing our signal stack " PFX " - " PFX "\n", info->sigstack.ss_sp, info->sigstack.ss_sp + info->sigstack.ss_size); if (APP_HAS_SIGSTACK(info)) { LOG(THREAD, LOG_ASYNCH, 2, "restoring app signal stack " PFX " - " PFX "\n", info->app_sigstack.ss_sp, info->app_sigstack.ss_sp + info->app_sigstack.ss_size); } else { ASSERT(TEST(SS_DISABLE, info->app_sigstack.ss_flags)); } if (info->sigstack.ss_sp != NULL) { /* i#552: to raise client exit event, we may call dynamo_process_exit * on sigstack in signal handler. * In that case we set sigstack (ss_sp) NULL to avoid stack swap. */ # ifdef MACOS if (info->app_sigstack.ss_sp == NULL) { /* Kernel fails w/ ENOMEM (even for SS_DISABLE) if ss_size is too small */ info->sigstack.ss_flags = SS_DISABLE; i = sigaltstack_syscall(&info->sigstack, NULL); /* i#1814: kernel gives EINVAL if last handler didn't call sigreturn! */ ASSERT(i == 0 || i == -EINVAL); } else { i = sigaltstack_syscall(&info->app_sigstack, NULL); /* i#1814: kernel gives EINVAL if last handler didn't call sigreturn! */ ASSERT(i == 0 || i == -EINVAL); } # else i = sigaltstack_syscall(&info->app_sigstack, NULL); ASSERT(i == 0); # endif } } #endif IF_LINUX(signalfd_thread_exit(dcontext, info)); special_heap_exit(info->sigheap); DELETE_LOCK(info->child_lock); #ifdef DEBUG /* for non-debug we do fast exit path and don't free local heap */ # ifdef HAVE_SIGALTSTACK if (info->sigstack.ss_sp != NULL) { /* i#552: to raise client exit event, we may call dynamo_process_exit * on sigstack in signal handler. * In that case we set sigstack (ss_sp) NULL to avoid stack free. */ stack_free(info->sigstack.ss_sp + info->sigstack.ss_size, info->sigstack.ss_size); } # endif HEAP_TYPE_FREE(dcontext, info, thread_sig_info_t, ACCT_OTHER, PROTECTED); #endif #ifdef PAPI /* use SIGPROF for updating gui so it can be distinguished from SIGVTALRM */ set_itimer_callback( dcontext, ITIMER_PROF, 500, (void (*func)(dcontext_t *, priv_mcontext_t *))perfctr_update_gui()); #endif } void set_handler_sigact(kernel_sigaction_t *act, int sig, handler_t handler) { act->handler = handler; #ifdef MACOS /* This is the real target */ act->tramp = (tramp_t)handler; #endif act->flags = SA_SIGINFO; /* send 3 args to handler */ #ifdef HAVE_SIGALTSTACK act->flags |= SA_ONSTACK; /* use our sigstack */ #endif /* We want the kernel to help us auto-restart syscalls, esp. when our signals * interrupt native code such as during attach or in client or DR code (i#2659). */ act->flags |= SA_RESTART; #if !defined(VMX86_SERVER) && defined(LINUX) /* PR 305020: must have SA_RESTORER for x64 */ /* i#2812: must have SA_RESTORER to handle vsyscall32 being disabled */ act->flags |= SA_RESTORER; act->restorer = (void (*)(void))dynamorio_sigreturn; #endif /* We block most signals within our handler */ kernel_sigfillset(&act->mask); /* i#184/PR 450670: we let our suspend signal interrupt our own handler * We never send more than one before resuming, so no danger to stack usage * from our own: but app could pile them up. */ kernel_sigdelset(&act->mask, SUSPEND_SIGNAL); /* i#193/PR 287309: we need to NOT suppress further SIGSEGV, for decode faults, * for try/except, and for !HAVE_MEMINFO probes. * Just like SUSPEND_SIGNAL, if app sends repeated SEGV, could run out of * alt stack: seems too corner-case to be worth increasing stack size. */ kernel_sigdelset(&act->mask, SIGSEGV); if (sig == SUSPEND_SIGNAL || sig == SIGSEGV) act->flags |= SA_NODEFER; /* Sigset is a 1 or 2 elt array of longs on X64/X86. Treat as 2 elt of * uint32. */ IF_DEBUG(uint32 *mask_sig = (uint32 *)&act->mask.sig[0]); LOG(THREAD_GET, LOG_ASYNCH, 3, "mask for our handler is " PFX " " PFX "\n", mask_sig[0], mask_sig[1]); } static void set_our_handler_sigact(kernel_sigaction_t *act, int sig) { set_handler_sigact(act, sig, (handler_t)master_signal_handler); } static void set_handler_and_record_app(dcontext_t *dcontext, thread_sig_info_t *info, int sig, kernel_sigaction_t *act) { int rc; kernel_sigaction_t oldact; ASSERT(sig <= MAX_SIGNUM); /* arm the signal */ rc = sigaction_syscall(sig, act, &oldact); ASSERT(rc == 0 /* Workaround for PR 223720, which was fixed in ESX4.0 but * is present in ESX3.5 and earlier: vmkernel treats * 63 and 64 as invalid signal numbers. */ IF_VMX86(|| (sig >= 63 && rc == -EINVAL))); if (rc != 0) /* be defensive: app will probably still work */ return; if (oldact.handler != (handler_t)SIG_DFL && oldact.handler != (handler_t)master_signal_handler) { /* save the app's action for sig */ if (info->shared_app_sigaction) { /* app_sigaction structure is shared */ mutex_lock(info->shared_lock); } if (info->app_sigaction[sig] != NULL) { /* go ahead and toss the old one, it's up to the app to store * and then restore later if it wants to */ handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); } info->app_sigaction[sig] = (kernel_sigaction_t *)handler_alloc(dcontext, sizeof(kernel_sigaction_t)); memcpy(info->app_sigaction[sig], &oldact, sizeof(kernel_sigaction_t)); /* clear cache */ info->restorer_valid[sig] = -1; if (info->shared_app_sigaction) mutex_unlock(info->shared_lock); #ifdef DEBUG if (oldact.handler == (handler_t)SIG_IGN) { LOG(THREAD, LOG_ASYNCH, 2, "app already installed SIG_IGN as sigaction for signal %d\n", sig); } else { LOG(THREAD, LOG_ASYNCH, 2, "app already installed " PFX " as sigaction flags=0x%x for signal %d\n", oldact.handler, oldact.flags, sig); } #endif } else { LOG(THREAD, LOG_ASYNCH, 2, "prior handler is " PFX " vs master " PFX " with flags=0x%x for signal %d\n", oldact.handler, master_signal_handler, oldact.flags, sig); if (info->app_sigaction[sig] != NULL) { if (info->shared_app_sigaction) mutex_lock(info->shared_lock); handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); info->app_sigaction[sig] = NULL; if (info->shared_app_sigaction) mutex_unlock(info->shared_lock); } } LOG(THREAD, LOG_ASYNCH, 3, "\twe intercept signal %d\n", sig); } /* Set up master_signal_handler as the handler for signal "sig", * for the current thread. Since we deal with kernel data structures * in our interception of system calls, we use them here as well, * to avoid having to translate to/from libc data structures. */ static void intercept_signal(dcontext_t *dcontext, thread_sig_info_t *info, int sig) { kernel_sigaction_t act; ASSERT(sig <= MAX_SIGNUM); set_our_handler_sigact(&act, sig); set_handler_and_record_app(dcontext, info, sig, &act); } static void intercept_signal_ignore_initially(dcontext_t *dcontext, thread_sig_info_t *info, int sig) { kernel_sigaction_t act; ASSERT(sig <= MAX_SIGNUM); memset(&act, 0, sizeof(act)); act.handler = (handler_t)SIG_IGN; set_handler_and_record_app(dcontext, info, sig, &act); } static void intercept_signal_no_longer_ignore(dcontext_t *dcontext, thread_sig_info_t *info, int sig) { kernel_sigaction_t act; int rc; ASSERT(sig <= MAX_SIGNUM); set_our_handler_sigact(&act, sig); rc = sigaction_syscall(sig, &act, NULL); ASSERT(rc == 0); } /* i#1921: For proper single-threaded native execution with re-takeover we need * to propagate signals. For now we only support going completely native in * this thread but without a full detach, so we abandon our signal handlers w/o * freeing memory up front. * We also use this for the start/stop interface where we are going fully native * for all threads. */ void signal_remove_handlers(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; kernel_sigaction_t act; memset(&act, 0, sizeof(act)); act.handler = (handler_t)SIG_DFL; kernel_sigemptyset(&act.mask); for (i = 1; i <= MAX_SIGNUM; i++) { if (info->app_sigaction[i] != NULL) { LOG(THREAD, LOG_ASYNCH, 2, "\trestoring " PFX " as handler for %d\n", info->app_sigaction[i]->handler, i); sigaction_syscall(i, info->app_sigaction[i], NULL); } else if (info->we_intercept[i]) { /* restore to default */ LOG(THREAD, LOG_ASYNCH, 2, "\trestoring SIG_DFL as handler for %d\n", i); sigaction_syscall(i, &act, NULL); } } DODEBUG({ removed_sig_handler = true; }); } void signal_remove_alarm_handlers(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; for (i = 1; i <= MAX_SIGNUM; i++) { if (!info->we_intercept[i]) continue; if (sig_is_alarm_signal(i)) { set_ignore_signal_action(i); } } } /* For attaching mid-run, we assume regular POSIX with handlers global to just one * thread group in the process. * We also use this routine for the initial setup of our handlers, which we * split from signal_thread_inherit() to support start/stop. */ void signal_reinstate_handlers(dcontext_t *dcontext, bool ignore_alarm) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; for (i = 1; i <= MAX_SIGNUM; i++) { bool skip = false; if (!info->we_intercept[i]) { skip = true; if (signal_is_interceptable(i)) { /* We do have to intercept everything the app does. * If the app removes its handler, we'll never remove ours, which we * can live with. */ kernel_sigaction_t oldact; int rc = sigaction_syscall(i, NULL, &oldact); ASSERT(rc == 0); if (rc == 0 && oldact.handler != (handler_t)SIG_DFL && oldact.handler != (handler_t)master_signal_handler) { skip = false; } } } if (skip) continue; if (sig_is_alarm_signal(i) && ignore_alarm) { LOG(THREAD, LOG_ASYNCH, 2, "\tignoring %d initially\n", i); intercept_signal_ignore_initially(dcontext, info, i); } else { LOG(THREAD, LOG_ASYNCH, 2, "\trestoring DR handler for %d\n", i); intercept_signal(dcontext, info, i); } } DODEBUG({ removed_sig_handler = false; }); } void signal_reinstate_alarm_handlers(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; for (i = 1; i <= MAX_SIGNUM; i++) { if (!info->we_intercept[i] || !sig_is_alarm_signal(i)) continue; LOG(THREAD, LOG_ASYNCH, 2, "\trestoring DR handler for %d\n", i); intercept_signal_no_longer_ignore(dcontext, info, i); } } /**** system call handlers ***********************************************/ /* FIXME: invalid pointer passed to kernel will currently show up * probably as a segfault in our handlers below...need to make them * look like kernel, and pass error code back to os.c */ void handle_clone(dcontext_t *dcontext, uint flags) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; if ((flags & CLONE_VM) == 0) { /* separate process not sharing memory */ if ((flags & CLONE_SIGHAND) != 0) { /* FIXME: how deal with this? * "man clone" says: "Since Linux 2.6.0-test6, flags must also * include CLONE_VM if CLONE_SIGHAND is specified" */ LOG(THREAD, LOG_ASYNCH, 1, "WARNING: !CLONE_VM but CLONE_SIGHAND!\n"); ASSERT_NOT_IMPLEMENTED(false); } return; } pre_second_thread(); if ((flags & CLONE_SIGHAND) != 0) { /* need to share table of handlers! */ LOG(THREAD, LOG_ASYNCH, 2, "handle_clone: CLONE_SIGHAND set!\n"); if (!info->shared_app_sigaction) { /* this is the start of a chain of sharing * no synch needed here, child not created yet */ info->shared_app_sigaction = true; info->shared_refcount = (int *)global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER)); *info->shared_refcount = 1; info->shared_lock = (mutex_t *)global_heap_alloc(sizeof(mutex_t) HEAPACCT(ACCT_OTHER)); ASSIGN_INIT_LOCK_FREE(*info->shared_lock, shared_lock); } /* else, some ancestor is already owner */ } else { /* child will inherit copy of current table -> cannot modify it * until child is scheduled! FIXME: any other way? */ mutex_lock(&info->child_lock); info->num_unstarted_children++; mutex_unlock(&info->child_lock); } if (TEST(CLONE_THREAD, flags) && os_itimers_thread_shared()) { if (!info->shared_itimer) { /* this is the start of a chain of sharing * no synch needed here, child not created yet */ info->shared_itimer = true; info->shared_itimer_refcount = (int *)global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER)); *info->shared_itimer_refcount = 1; info->shared_itimer_underDR = (int *)global_heap_alloc(sizeof(int) HEAPACCT(ACCT_OTHER)); *info->shared_itimer_underDR = 1; } /* else, some ancestor already created */ } } /* Returns false if should NOT issue syscall. * In such a case, the result is in "result". * If *result is non-zero, the syscall should fail. * We could instead issue the syscall and expect it to fail, which would have a more * accurate error code, but that risks missing a failure (e.g., RT on Android * which in some cases returns success on bugus params). * It seems better to err on the side of the wrong error code or failing when * we shouldn't, than to think it failed when it didn't, which is more complex * to deal with. */ bool handle_sigaction(dcontext_t *dcontext, int sig, const kernel_sigaction_t *act, prev_sigaction_t *oact, size_t sigsetsize, OUT uint *result) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; kernel_sigaction_t *save; kernel_sigaction_t local_act; if (sigsetsize != sizeof(kernel_sigset_t)) { *result = EINVAL; return false; } if (act != NULL) { /* Linux checks readability before checking the signal number. */ if (!safe_read(act, sizeof(local_act), &local_act)) { *result = EFAULT; return false; } } /* i#1135: app may pass invalid signum to find MAX_SIGNUM */ if (sig <= 0 || sig > MAX_SIGNUM || (act != NULL && !signal_is_interceptable(sig))) { *result = EINVAL; return false; } if (act != NULL) { /* app is installing a new action */ while (info->num_unstarted_children > 0) { /* must wait for children to start and copy our state * before we modify it! */ os_thread_yield(); } info->sigaction_param = act; } if (info->shared_app_sigaction) { /* app_sigaction structure is shared */ mutex_lock(info->shared_lock); } if (oact != NULL) { /* Keep a copy of the prior one for post-syscall to hand to the app. */ info->use_kernel_prior_sigaction = false; if (info->app_sigaction[sig] == NULL) { if (info->we_intercept[sig]) { /* need to pretend there is no handler */ memset(&info->prior_app_sigaction, 0, sizeof(info->prior_app_sigaction)); info->prior_app_sigaction.handler = (handler_t)SIG_DFL; } else { info->use_kernel_prior_sigaction = true; } } else { memcpy(&info->prior_app_sigaction, info->app_sigaction[sig], sizeof(info->prior_app_sigaction)); } } if (act != NULL) { if (local_act.handler == (handler_t)SIG_IGN || local_act.handler == (handler_t)SIG_DFL) { LOG(THREAD, LOG_ASYNCH, 2, "app installed %s as sigaction for signal %d\n", (local_act.handler == (handler_t)SIG_IGN) ? "SIG_IGN" : "SIG_DFL", sig); if (!info->we_intercept[sig]) { /* let the SIG_IGN/SIG_DFL go through, we want to remove our * handler. we delete the stored app_sigaction in post_ */ if (info->shared_app_sigaction) mutex_unlock(info->shared_lock); return true; } } else { LOG(THREAD, LOG_ASYNCH, 2, "app installed " PFX " as sigaction for signal %d\n", local_act.handler, sig); DOLOG(2, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 2, "signal mask for handler:\n"); dump_sigset(dcontext, (kernel_sigset_t *)&local_act.mask); }); } /* save app's entire sigaction struct */ save = (kernel_sigaction_t *)handler_alloc(dcontext, sizeof(kernel_sigaction_t)); memcpy(save, &local_act, sizeof(kernel_sigaction_t)); /* Remove the unblockable sigs */ kernel_sigdelset(&save->mask, SIGKILL); kernel_sigdelset(&save->mask, SIGSTOP); if (info->app_sigaction[sig] != NULL) { /* go ahead and toss the old one, it's up to the app to store * and then restore later if it wants to */ handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); } info->app_sigaction[sig] = save; LOG(THREAD, LOG_ASYNCH, 3, "\tflags = " PFX ", %s = " PFX "\n", local_act.flags, IF_MACOS_ELSE("tramp", "restorer"), IF_MACOS_ELSE(local_act.tramp, local_act.restorer)); /* clear cache */ info->restorer_valid[sig] = -1; } if (info->shared_app_sigaction) mutex_unlock(info->shared_lock); if (info->we_intercept[sig]) { /* cancel the syscall */ *result = handle_post_sigaction(dcontext, true, sig, act, oact, sigsetsize); return false; } if (act != NULL) { /* Now hand kernel our master handler instead of app's. */ set_our_handler_sigact(&info->our_sigaction, sig); set_syscall_param(dcontext, 1, (reg_t)&info->our_sigaction); /* FIXME PR 297033: we don't support intercepting DEFAULT_STOP / * DEFAULT_CONTINUE signals b/c we can't generate the default * action: if the app registers a handler, though, we should work * properly if we never see SIG_DFL. */ } return true; } /* os.c thinks it's passing us struct_sigaction, really it's kernel_sigaction_t, * which has fields in different order. * Only called on success. * Returns the desired app return value (caller will negate if nec). */ uint handle_post_sigaction(dcontext_t *dcontext, bool success, int sig, const kernel_sigaction_t *act, prev_sigaction_t *oact, size_t sigsetsize) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; if (act != NULL) { /* Restore app register value, in case we changed it. */ set_syscall_param(dcontext, 1, (reg_t)info->sigaction_param); } if (!success) return 0; /* don't change return value */ ASSERT(sig <= MAX_SIGNUM && sig > 0); if (oact != NULL) { if (info->use_kernel_prior_sigaction) { /* Real syscall succeeded with oact so it must be readable, barring races. */ ASSERT(oact->handler == (handler_t)SIG_IGN || oact->handler == (handler_t)SIG_DFL); } else { /* We may have skipped the syscall so we have to check writability */ #ifdef MACOS /* On MacOS prev_sigaction_t is a different type (i#2105) */ bool fault = true; TRY_EXCEPT(dcontext, { oact->handler = info->prior_app_sigaction.handler; oact->mask = info->prior_app_sigaction.mask; oact->flags = info->prior_app_sigaction.flags; fault = false; }, { /* EXCEPT */ /* nothing: fault is already true */ }); if (fault) return EFAULT; #else if (!safe_write_ex(oact, sizeof(*oact), &info->prior_app_sigaction, NULL)) { /* We actually don't have to undo installing any passed action * b/c the Linux kernel does that *before* checking oact perms. */ return EFAULT; } #endif } } /* If installing IGN or DFL, delete ours. * XXX: This is racy. We can't hold the lock across the syscall, though. * What we should do is just drop support for -no_intercept_all_signals, * which is off by default anyway and never turned off. */ if (act != NULL && /* De-ref here should work barring races: already racy and non-default so not * bothering with safe_read. */ ((act->handler == (handler_t)SIG_IGN || act->handler == (handler_t)SIG_DFL) && !info->we_intercept[sig]) && info->app_sigaction[sig] != NULL) { if (info->shared_app_sigaction) mutex_lock(info->shared_lock); /* remove old stored app action */ handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); info->app_sigaction[sig] = NULL; if (info->shared_app_sigaction) mutex_unlock(info->shared_lock); } return 0; } #ifdef LINUX static bool convert_old_sigaction_to_kernel(dcontext_t *dcontext, kernel_sigaction_t *ks, const old_sigaction_t *os) { bool res = false; TRY_EXCEPT(dcontext, { ks->handler = os->handler; ks->flags = os->flags; ks->restorer = os->restorer; kernel_sigemptyset(&ks->mask); ks->mask.sig[0] = os->mask; res = true; }, { /* EXCEPT */ /* nothing: res is already false */ }); return res; } static bool convert_kernel_sigaction_to_old(dcontext_t *dcontext, old_sigaction_t *os, const kernel_sigaction_t *ks) { bool res = false; TRY_EXCEPT(dcontext, { os->handler = ks->handler; os->flags = ks->flags; os->restorer = ks->restorer; os->mask = ks->mask.sig[0]; res = true; }, { /* EXCEPT */ /* nothing: res is already false */ }); return res; } /* Returns false (and "result") if should NOT issue syscall. */ bool handle_old_sigaction(dcontext_t *dcontext, int sig, const old_sigaction_t *act, old_sigaction_t *oact, OUT uint *result) { kernel_sigaction_t kact; kernel_sigaction_t okact; bool res; if (act != NULL) { if (!convert_old_sigaction_to_kernel(dcontext, &kact, act)) { *result = EFAULT; return false; } } res = handle_sigaction(dcontext, sig, act == NULL ? NULL : &kact, oact == NULL ? NULL : &okact, sizeof(kernel_sigset_t), result); if (!res) *result = handle_post_old_sigaction(dcontext, true, sig, act, oact); return res; } /* Returns the desired app return value (caller will negate if nec). */ uint handle_post_old_sigaction(dcontext_t *dcontext, bool success, int sig, const old_sigaction_t *act, old_sigaction_t *oact) { kernel_sigaction_t kact; kernel_sigaction_t okact; ptr_uint_t res; if (act != NULL && success) { if (!convert_old_sigaction_to_kernel(dcontext, &kact, act)) { ASSERT(!success); return EFAULT; } } if (oact != NULL && success) { if (!convert_old_sigaction_to_kernel(dcontext, &okact, oact)) { ASSERT(!success); return EFAULT; } } res = handle_post_sigaction(dcontext, success, sig, act == NULL ? NULL : &kact, oact == NULL ? NULL : &okact, sizeof(kernel_sigset_t)); if (res == 0 && oact != NULL) { if (!convert_kernel_sigaction_to_old(dcontext, oact, &okact)) { return EFAULT; } } return res; } #endif /* LINUX */ /* Returns false and sets *result if should NOT issue syscall. * If *result is non-zero, the syscall should fail. */ bool handle_sigaltstack(dcontext_t *dcontext, const stack_t *stack, stack_t *old_stack, reg_t cur_xsp, OUT uint *result) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; stack_t local_stack; if (old_stack != NULL) { if (!safe_write_ex(old_stack, sizeof(*old_stack), &info->app_sigstack, NULL)) { *result = EFAULT; return false; } } if (stack != NULL) { /* Fail in the same way the kernel does. */ if (!safe_read(stack, sizeof(local_stack), &local_stack)) { *result = EFAULT; return false; } if (APP_HAS_SIGSTACK(info)) { /* The app is not allowed to set a new altstack while on the current one. */ reg_t cur_sigstk = (reg_t)info->app_sigstack.ss_sp; if (cur_xsp >= cur_sigstk && cur_xsp < cur_sigstk + info->app_sigstack.ss_size) { *result = EPERM; return false; } } uint key_flag = local_stack.ss_flags & ~SS_FLAG_BITS; if (key_flag != SS_DISABLE && key_flag != SS_ONSTACK && key_flag != 0) { *result = EINVAL; return false; } if (key_flag == SS_DISABLE) { /* Zero the other params and don't even check them. */ local_stack.ss_sp = NULL; local_stack.ss_size = 0; } else { if (local_stack.ss_size < MINSIGSTKSZ) { *result = ENOMEM; return false; } } info->app_sigstack = local_stack; LOG(THREAD, LOG_ASYNCH, 2, "Setting app signal stack to " PFX "-" PFX " %d=%s\n", local_stack.ss_sp, local_stack.ss_sp + local_stack.ss_size - 1, local_stack.ss_flags, (APP_HAS_SIGSTACK(info)) ? "enabled" : "disabled"); } *result = 0; return false; /* always cancel syscall */ } /* Blocked signals: * In general, we don't need to keep track of blocked signals. * We only need to do so for those signals we intercept ourselves. * Thus, info->app_sigblocked ONLY contains entries for signals * we intercept ourselves. * PR 304708: we now intercept all signals. */ static void set_blocked(dcontext_t *dcontext, kernel_sigset_t *set, bool absolute) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; if (absolute) { /* discard current blocked signals, re-set from new mask */ kernel_sigemptyset(&info->app_sigblocked); } /* else, OR in the new set */ for (i = 1; i <= MAX_SIGNUM; i++) { if (EMULATE_SIGMASK(info, i) && kernel_sigismember(set, i)) { kernel_sigaddset(&info->app_sigblocked, i); } } #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "blocked signals are now:\n"); dump_sigset(dcontext, &info->app_sigblocked); } #endif } void signal_set_mask(dcontext_t *dcontext, kernel_sigset_t *sigset) { set_blocked(dcontext, sigset, true /*absolute*/); } void signal_swap_mask(dcontext_t *dcontext, bool to_app) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; if (to_app) { if (init_info.app_sigaction != NULL) { /* This is the first execution of the app. * We need to remove our own init-time handler and mask. */ unset_initial_crash_handlers(dcontext); return; } sigprocmask_syscall(SIG_SETMASK, &info->app_sigblocked, NULL, sizeof(info->app_sigblocked)); } else { unblock_all_signals(&info->app_sigblocked); DOLOG(2, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 2, "thread %d's initial app signal mask:\n", get_thread_id()); dump_sigset(dcontext, &info->app_sigblocked); }); } } /* Scans over info->sigpending to see if there are any unblocked, pending * signals, and sets dcontext->signals_pending if there are. Do this after * modifying the set of signals blocked by the application. */ void check_signals_pending(dcontext_t *dcontext, thread_sig_info_t *info) { int i; if (dcontext->signals_pending != 0) return; for (i = 1; i <= MAX_SIGNUM; i++) { if (info->sigpending[i] != NULL && !kernel_sigismember(&info->app_sigblocked, i) && !dcontext->signals_pending) { /* We only update the application's set of blocked signals from * syscall handlers, so we know we'll go back to dispatch and see * this flag right away. */ LOG(THREAD, LOG_ASYNCH, 3, "\tsetting signals_pending flag\n"); dcontext->signals_pending = 1; break; } } } /* Returns whether to execute the syscall */ bool handle_sigprocmask(dcontext_t *dcontext, int how, kernel_sigset_t *app_set, kernel_sigset_t *oset, size_t sigsetsize) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; kernel_sigset_t safe_set; /* If we're intercepting all, we emulate the whole thing */ bool execute_syscall = !DYNAMO_OPTION(intercept_all_signals); LOG(THREAD, LOG_ASYNCH, 2, "handle_sigprocmask\n"); if (oset != NULL) info->pre_syscall_app_sigblocked = info->app_sigblocked; if (app_set != NULL && safe_read(app_set, sizeof(safe_set), &safe_set)) { if (execute_syscall) { /* The syscall will execute, so remove from the set passed * to it. We restore post-syscall. * XXX i#1187: we could crash here touching app memory -- could * use TRY, but the app could pass read-only memory and it * would work natively! Better to swap in our own * allocated data struct. There's a transparency issue w/ * races too if another thread looks at this memory. This * won't happen by default b/c -intercept_all_signals is * on by default so we don't try to solve all these * issues. */ info->pre_syscall_app_sigprocmask = safe_set; } if (how == SIG_BLOCK) { /* The set of blocked signals is the union of the current * set and the set argument. */ for (i = 1; i <= MAX_SIGNUM; i++) { if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) { kernel_sigaddset(&info->app_sigblocked, i); if (execute_syscall) kernel_sigdelset(app_set, i); } } } else if (how == SIG_UNBLOCK) { /* The signals in set are removed from the current set of * blocked signals. */ for (i = 1; i <= MAX_SIGNUM; i++) { if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) { kernel_sigdelset(&info->app_sigblocked, i); if (execute_syscall) kernel_sigdelset(app_set, i); } } } else if (how == SIG_SETMASK) { /* The set of blocked signals is set to the argument set. */ kernel_sigemptyset(&info->app_sigblocked); for (i = 1; i <= MAX_SIGNUM; i++) { if (EMULATE_SIGMASK(info, i) && kernel_sigismember(&safe_set, i)) { kernel_sigaddset(&info->app_sigblocked, i); if (execute_syscall) kernel_sigdelset(app_set, i); } } } #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "blocked signals are now:\n"); dump_sigset(dcontext, &info->app_sigblocked); } #endif /* make sure we deliver pending signals that are now unblocked * FIXME: consider signal #S, which we intercept ourselves. * If S arrives, then app blocks it prior to our delivering it, * we then won't deliver it until app unblocks it...is this a * problem? Could have arrived a little later and then we would * do same thing, but this way kernel may send one more than would * get w/o dynamo? This goes away if we deliver signals * prior to letting app do a syscall. */ check_signals_pending(dcontext, info); } if (!execute_syscall) { handle_post_sigprocmask(dcontext, how, app_set, oset, sigsetsize); return false; /* skip syscall */ } else return true; } /* need to add in our signals that the app thinks are blocked */ void handle_post_sigprocmask(dcontext_t *dcontext, int how, kernel_sigset_t *app_set, kernel_sigset_t *oset, size_t sigsetsize) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; if (!DYNAMO_OPTION(intercept_all_signals)) { /* Restore app memory */ safe_write_ex(app_set, sizeof(*app_set), &info->pre_syscall_app_sigprocmask, NULL); } if (oset != NULL) { if (DYNAMO_OPTION(intercept_all_signals)) safe_write_ex(oset, sizeof(*oset), &info->pre_syscall_app_sigblocked, NULL); else { /* the syscall wrote to oset already, so just add any additional */ for (i = 1; i <= MAX_SIGNUM; i++) { if (EMULATE_SIGMASK(info, i) && /* use the pre-syscall value: do not take into account changes * from this syscall itself! (PR 523394) */ kernel_sigismember(&info->pre_syscall_app_sigblocked, i)) { kernel_sigaddset(oset, i); } } } } } void handle_sigsuspend(dcontext_t *dcontext, kernel_sigset_t *set, size_t sigsetsize) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; ASSERT(set != NULL); LOG(THREAD, LOG_ASYNCH, 2, "handle_sigsuspend\n"); info->in_sigsuspend = true; info->app_sigblocked_save = info->app_sigblocked; kernel_sigemptyset(&info->app_sigblocked); for (i = 1; i <= MAX_SIGNUM; i++) { if (EMULATE_SIGMASK(info, i) && kernel_sigismember(set, i)) { kernel_sigaddset(&info->app_sigblocked, i); kernel_sigdelset(set, i); } } #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "in sigsuspend, blocked signals are now:\n"); dump_sigset(dcontext, &info->app_sigblocked); } #endif } /**** utility routines ***********************************************/ #ifdef DEBUG static void dump_sigset(dcontext_t *dcontext, kernel_sigset_t *set) { int sig; for (sig = 1; sig <= MAX_SIGNUM; sig++) { if (kernel_sigismember(set, sig)) LOG(THREAD, LOG_ASYNCH, 1, "\t%d = blocked\n", sig); } } #endif /* DEBUG */ /* PR 205795: to avoid lock problems w/ in_fcache (it grabs a lock, we * could have interrupted someone holding that), we first check * whereami --- if whereami is DR_WHERE_FCACHE we still check the pc * to distinguish generated routines, but at least we're certain * it's not in DR where it could own a lock. * We can't use is_on_dstack() here b/c we need to handle clean call * arg crashes -- which is too bad since checking client dll and DR dll is * not sufficient due to calls to ntdll, libc, or pc being in gencode. */ static bool safe_is_in_fcache(dcontext_t *dcontext, app_pc pc, app_pc xsp) { if (dcontext->whereami != DR_WHERE_FCACHE || IF_CLIENT_INTERFACE(is_in_client_lib(pc) ||) is_in_dynamo_dll(pc) || is_on_initstack(xsp)) return false; /* Reasonably certain not in DR code, so no locks should be held */ return in_fcache(pc); } static bool safe_is_in_coarse_stubs(dcontext_t *dcontext, app_pc pc, app_pc xsp) { if (dcontext->whereami != DR_WHERE_FCACHE || IF_CLIENT_INTERFACE(is_in_client_lib(pc) ||) is_in_dynamo_dll(pc) || is_on_initstack(xsp)) return false; /* Reasonably certain not in DR code, so no locks should be held */ return in_coarse_stubs(pc); } static bool is_on_alt_stack(dcontext_t *dcontext, byte *sp) { #ifdef HAVE_SIGALTSTACK thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; return (sp >= (byte *)info->sigstack.ss_sp && /* deliberate equality check since stacks often init to top */ sp <= (byte *)(info->sigstack.ss_sp + info->sigstack.ss_size)); #else return false; #endif } /* The caller must initialize ucxt, including its fpstate pointer for x86 Linux. */ static void sig_full_initialize(sig_full_cxt_t *sc_full, kernel_ucontext_t *ucxt) { sc_full->sc = SIGCXT_FROM_UCXT(ucxt); #ifdef X86 sc_full->fp_simd_state = NULL; /* we have a ptr inside sigcontext_t */ #elif defined(ARM) sc_full->fp_simd_state = &ucxt->coproc.uc_vfp; #elif defined(AARCH64) sc_full->fp_simd_state = &ucxt->uc_mcontext.__reserved; #else ASSERT_NOT_IMPLEMENTED(false); #endif } void sigcontext_to_mcontext(priv_mcontext_t *mc, sig_full_cxt_t *sc_full, dr_mcontext_flags_t flags) { sigcontext_t *sc = sc_full->sc; ASSERT(mc != NULL && sc != NULL); #ifdef X86 if (TEST(DR_MC_INTEGER, flags)) { mc->xax = sc->SC_XAX; mc->xbx = sc->SC_XBX; mc->xcx = sc->SC_XCX; mc->xdx = sc->SC_XDX; mc->xsi = sc->SC_XSI; mc->xdi = sc->SC_XDI; mc->xbp = sc->SC_XBP; # ifdef X64 mc->r8 = sc->SC_FIELD(r8); mc->r9 = sc->SC_FIELD(r9); mc->r10 = sc->SC_FIELD(r10); mc->r11 = sc->SC_FIELD(r11); mc->r12 = sc->SC_FIELD(r12); mc->r13 = sc->SC_FIELD(r13); mc->r14 = sc->SC_FIELD(r14); mc->r15 = sc->SC_FIELD(r15); # endif /* X64 */ } if (TEST(DR_MC_CONTROL, flags)) { mc->xsp = sc->SC_XSP; mc->xflags = sc->SC_XFLAGS; mc->pc = (app_pc)sc->SC_XIP; } #elif defined(AARCH64) if (TEST(DR_MC_INTEGER, flags)) memcpy(&mc->r0, &sc->SC_FIELD(regs[0]), sizeof(mc->r0) * 31); if (TEST(DR_MC_CONTROL, flags)) { /* XXX i#2710: the link register should be under DR_MC_CONTROL */ mc->sp = sc->SC_FIELD(sp); mc->pc = (void *)sc->SC_FIELD(pc); mc->nzcv = sc->SC_FIELD(pstate); } #elif defined(ARM) if (TEST(DR_MC_INTEGER, flags)) { mc->r0 = sc->SC_FIELD(arm_r0); mc->r1 = sc->SC_FIELD(arm_r1); mc->r2 = sc->SC_FIELD(arm_r2); mc->r3 = sc->SC_FIELD(arm_r3); mc->r4 = sc->SC_FIELD(arm_r4); mc->r5 = sc->SC_FIELD(arm_r5); mc->r6 = sc->SC_FIELD(arm_r6); mc->r7 = sc->SC_FIELD(arm_r7); mc->r8 = sc->SC_FIELD(arm_r8); mc->r9 = sc->SC_FIELD(arm_r9); mc->r10 = sc->SC_FIELD(arm_r10); mc->r11 = sc->SC_FIELD(arm_fp); mc->r12 = sc->SC_FIELD(arm_ip); /* XXX i#2710: the link register should be under DR_MC_CONTROL */ mc->r14 = sc->SC_FIELD(arm_lr); } if (TEST(DR_MC_CONTROL, flags)) { mc->r13 = sc->SC_FIELD(arm_sp); mc->r15 = sc->SC_FIELD(arm_pc); mc->cpsr = sc->SC_FIELD(arm_cpsr); } # ifdef X64 # error NYI on AArch64 # endif /* X64 */ #endif /* X86/ARM */ if (TEST(DR_MC_MULTIMEDIA, flags)) sigcontext_to_mcontext_simd(mc, sc_full); } /* Note that unlike mcontext_to_context(), this routine does not fill in * any state that is not present in the mcontext: in particular, it assumes * the sigcontext already contains the native fpstate. If the caller * is generating a synthetic sigcontext, the caller should call * save_fpstate() before calling this routine. */ /* XXX: on ARM, sigreturn needs the T bit set in the sigcontext_t cpsr field in * order to return to Thumb mode. But, our mcontext doesn't have the T bit (b/c * usermode can't read it). Thus callers must either modify an mcontext * obtained from sigcontext_to_mcontext() or must call set_pc_mode_in_cpsr() in * order to create a proper sigcontext for sigreturn. All callers here do so. * The only external non-Windows caller of thread_set_mcontext() is * translate_from_synchall_to_dispatch() who first does a thread_get_mcontext() * and tweaks that context, so cpsr should be there. */ void mcontext_to_sigcontext(sig_full_cxt_t *sc_full, priv_mcontext_t *mc, dr_mcontext_flags_t flags) { sigcontext_t *sc = sc_full->sc; ASSERT(mc != NULL && sc != NULL); #ifdef X86 if (TEST(DR_MC_INTEGER, flags)) { sc->SC_XAX = mc->xax; sc->SC_XBX = mc->xbx; sc->SC_XCX = mc->xcx; sc->SC_XDX = mc->xdx; sc->SC_XSI = mc->xsi; sc->SC_XDI = mc->xdi; sc->SC_XBP = mc->xbp; # ifdef X64 sc->SC_FIELD(r8) = mc->r8; sc->SC_FIELD(r9) = mc->r9; sc->SC_FIELD(r10) = mc->r10; sc->SC_FIELD(r11) = mc->r11; sc->SC_FIELD(r12) = mc->r12; sc->SC_FIELD(r13) = mc->r13; sc->SC_FIELD(r14) = mc->r14; sc->SC_FIELD(r15) = mc->r15; # endif /* X64 */ } if (TEST(DR_MC_CONTROL, flags)) { sc->SC_XSP = mc->xsp; sc->SC_XFLAGS = mc->xflags; sc->SC_XIP = (ptr_uint_t)mc->pc; } #elif defined(AARCH64) if (TEST(DR_MC_INTEGER, flags)) { memcpy(&sc->SC_FIELD(regs[0]), &mc->r0, sizeof(mc->r0) * 31); } if (TEST(DR_MC_CONTROL, flags)) { /* XXX i#2710: the link register should be under DR_MC_CONTROL */ sc->SC_FIELD(sp) = mc->sp; sc->SC_FIELD(pc) = (ptr_uint_t)mc->pc; sc->SC_FIELD(pstate) = mc->nzcv; } #elif defined(ARM) if (TEST(DR_MC_INTEGER, flags)) { sc->SC_FIELD(arm_r0) = mc->r0; sc->SC_FIELD(arm_r1) = mc->r1; sc->SC_FIELD(arm_r2) = mc->r2; sc->SC_FIELD(arm_r3) = mc->r3; sc->SC_FIELD(arm_r4) = mc->r4; sc->SC_FIELD(arm_r5) = mc->r5; sc->SC_FIELD(arm_r6) = mc->r6; sc->SC_FIELD(arm_r7) = mc->r7; sc->SC_FIELD(arm_r8) = mc->r8; sc->SC_FIELD(arm_r9) = mc->r9; sc->SC_FIELD(arm_r10) = mc->r10; sc->SC_FIELD(arm_fp) = mc->r11; sc->SC_FIELD(arm_ip) = mc->r12; /* XXX i#2710: the link register should be under DR_MC_CONTROL */ sc->SC_FIELD(arm_lr) = mc->r14; } if (TEST(DR_MC_CONTROL, flags)) { sc->SC_FIELD(arm_sp) = mc->r13; sc->SC_FIELD(arm_pc) = mc->r15; sc->SC_FIELD(arm_cpsr) = mc->cpsr; } # ifdef X64 # error NYI on AArch64 # endif /* X64 */ #endif /* X86/ARM */ if (TEST(DR_MC_MULTIMEDIA, flags)) mcontext_to_sigcontext_simd(sc_full, mc); } static void ucontext_to_mcontext(priv_mcontext_t *mc, kernel_ucontext_t *uc) { sig_full_cxt_t sc_full; sig_full_initialize(&sc_full, uc); sigcontext_to_mcontext(mc, &sc_full, DR_MC_ALL); } static void mcontext_to_ucontext(kernel_ucontext_t *uc, priv_mcontext_t *mc) { sig_full_cxt_t sc_full; sig_full_initialize(&sc_full, uc); mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL); } #ifdef AARCHXX static void set_sigcxt_stolen_reg(sigcontext_t *sc, reg_t val) { *(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0)) = val; } static reg_t get_sigcxt_stolen_reg(sigcontext_t *sc) { return *(&sc->SC_R0 + (dr_reg_stolen - DR_REG_R0)); } # ifndef AARCH64 static dr_isa_mode_t get_pc_mode_from_cpsr(sigcontext_t *sc) { return TEST(EFLAGS_T, sc->SC_XFLAGS) ? DR_ISA_ARM_THUMB : DR_ISA_ARM_A32; } static void set_pc_mode_in_cpsr(sigcontext_t *sc, dr_isa_mode_t isa_mode) { if (isa_mode == DR_ISA_ARM_THUMB) sc->SC_XFLAGS |= EFLAGS_T; else sc->SC_XFLAGS &= ~EFLAGS_T; } # endif #endif /* Returns whether successful. If avoid_failure, tries to translate * at least pc if not successful. Pass f if known. */ static bool translate_sigcontext(dcontext_t *dcontext, kernel_ucontext_t *uc, bool avoid_failure, fragment_t *f) { bool success = false; priv_mcontext_t mcontext; sigcontext_t *sc = SIGCXT_FROM_UCXT(uc); ucontext_to_mcontext(&mcontext, uc); /* FIXME: if cannot find exact match, we're in trouble! * probably ok to delay, since that indicates not a synchronous * signal. */ /* FIXME : in_fcache() (called by recreate_app_state) grabs fcache * fcache_unit_areas.lock, we could deadlock! Also on initexit_lock * == PR 205795/1317 */ /* For safe recreation we need to either be couldbelinking or hold the * initexit lock (to keep someone from flushing current fragment), the * initexit lock is easier */ mutex_lock(&thread_initexit_lock); /* PR 214962: we assume we're going to relocate to this stored context, * so we restore memory now */ if (translate_mcontext(dcontext->thread_record, &mcontext, true /*restore memory*/, f)) { mcontext_to_ucontext(uc, &mcontext); success = true; } else { if (avoid_failure) { ASSERT_NOT_REACHED(); /* is ok to break things, is UNIX :) */ /* FIXME : what to do? reg state might be wrong at least get pc */ if (safe_is_in_fcache(dcontext, (cache_pc)sc->SC_XIP, (app_pc)sc->SC_XSP)) { sc->SC_XIP = (ptr_uint_t)recreate_app_pc(dcontext, mcontext.pc, f); ASSERT(sc->SC_XIP != (ptr_uint_t)NULL); } else { /* FIXME : can't even get pc right, what do we do here? */ sc->SC_XIP = 0; } } } mutex_unlock(&thread_initexit_lock); /* FIXME i#2095: restore the app's segment register value(s). */ LOG(THREAD, LOG_ASYNCH, 3, "\ttranslate_sigcontext: just set frame's eip to " PFX "\n", sc->SC_XIP); return success; } /* Takes an os-specific context */ void thread_set_self_context(void *cxt) { #ifdef X86 if (!INTERNAL_OPTION(use_sigreturn_setcontext)) { sigcontext_t *sc = (sigcontext_t *)cxt; dr_jmp_buf_t buf; buf.xbx = sc->SC_XBX; buf.xcx = sc->SC_XCX; buf.xdi = sc->SC_XDI; buf.xsi = sc->SC_XSI; buf.xbp = sc->SC_XBP; /* XXX: this is not fully transparent: it assumes the target stack * is valid and that we can clobber the slot beyond TOS. * Using this instead of sigreturn is meant mainly as a diagnostic * to help debug future issues with sigreturn (xref i#2080). */ buf.xsp = sc->SC_XSP - XSP_SZ; /* extra slot for retaddr */ buf.xip = sc->SC_XIP; # ifdef X64 buf.r8 = sc->r8; buf.r9 = sc->r9; buf.r10 = sc->r10; buf.r11 = sc->r11; buf.r12 = sc->r12; buf.r13 = sc->r13; buf.r14 = sc->r14; buf.r15 = sc->r15; # endif dr_longjmp(&buf, sc->SC_XAX); return; } #endif dcontext_t *dcontext = get_thread_private_dcontext(); /* Unlike Windows we can't say "only set this subset of the * full machine state", so we need to get the rest of the state, */ sigframe_rt_t frame; #if defined(LINUX) || defined(DEBUG) sigcontext_t *sc = (sigcontext_t *)cxt; #endif app_pc xsp_for_sigreturn; #ifdef VMX86_SERVER ASSERT_NOT_IMPLEMENTED(false); /* PR 405694: can't use regular sigreturn! */ #endif memset(&frame, 0, sizeof(frame)); #ifdef LINUX # ifdef X86 byte *xstate = get_xstate_buffer(dcontext); frame.uc.uc_mcontext.fpstate = &((kernel_xstate_t *)xstate)->fpstate; # endif /* X86 */ frame.uc.uc_mcontext = *sc; #endif save_fpstate(dcontext, &frame); /* The kernel calls do_sigaltstack on sys_rt_sigreturn primarily to ensure * the frame is ok, but the side effect is we can mess up our own altstack * settings if we're not careful. Having invalid ss_size looks good for * kernel 2.6.23.9 at least so we leave frame.uc.uc_stack as all zeros. */ /* make sure sigreturn's mask setting doesn't change anything */ sigprocmask_syscall(SIG_SETMASK, NULL, (kernel_sigset_t *)&frame.uc.uc_sigmask, sizeof(frame.uc.uc_sigmask)); LOG(THREAD_GET, LOG_ASYNCH, 2, "thread_set_self_context: pc=" PFX "\n", sc->SC_XIP); LOG(THREAD_GET, LOG_ASYNCH, 3, "full sigcontext\n"); DOLOG(LOG_ASYNCH, 3, { dump_sigcontext(dcontext, get_sigcontext_from_rt_frame(&frame)); }); /* set up xsp to point at &frame + sizeof(char*) */ xsp_for_sigreturn = ((app_pc)&frame) + sizeof(char *); #ifdef X86 asm("mov %0, %%" ASM_XSP : : "m"(xsp_for_sigreturn)); # ifdef MACOS ASSERT_NOT_IMPLEMENTED(false && "need to pass 2 params to SYS_sigreturn"); asm("jmp _dynamorio_sigreturn"); # else /* i#2632: recent clang for 32-bit annoyingly won't do the right thing for * "jmp dynamorio_sigreturn" and leaves relocs so we ensure it's PIC: */ void (*asm_jmp_tgt)() = dynamorio_sigreturn; asm("mov %0, %%" ASM_XCX : : "m"(asm_jmp_tgt)); asm("jmp *%" ASM_XCX); # endif /* MACOS/LINUX */ #elif defined(AARCH64) ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ #elif defined(ARM) asm("ldr " ASM_XSP ", %0" : : "m"(xsp_for_sigreturn)); asm("b dynamorio_sigreturn"); #endif /* X86/ARM */ ASSERT_NOT_REACHED(); } static void thread_set_segment_registers(sigcontext_t *sc) { #ifdef X86 /* Fill in the segment registers */ __asm__ __volatile__("mov %%cs, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(cs)) : : "eax"); # ifndef X64 __asm__ __volatile__("mov %%ss, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(ss)) : : "eax"); __asm__ __volatile__("mov %%ds, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(ds)) : : "eax"); __asm__ __volatile__("mov %%es, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(es)) : : "eax"); # endif __asm__ __volatile__("mov %%fs, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(fs)) : : "eax"); __asm__ __volatile__("mov %%gs, %%ax; mov %%ax, %0" : "=m"(sc->SC_FIELD(gs)) : : "eax"); #endif } /* Takes a priv_mcontext_t */ void thread_set_self_mcontext(priv_mcontext_t *mc) { kernel_ucontext_t ucxt; sig_full_cxt_t sc_full; sig_full_initialize(&sc_full, &ucxt); #if defined(LINUX) && defined(X86) sc_full.sc->fpstate = NULL; /* for mcontext_to_sigcontext */ #endif mcontext_to_sigcontext(&sc_full, mc, DR_MC_ALL); thread_set_segment_registers(sc_full.sc); /* sigreturn takes the mode from cpsr */ IF_ARM( set_pc_mode_in_cpsr(sc_full.sc, dr_get_isa_mode(get_thread_private_dcontext()))); /* thread_set_self_context will fill in the real fp/simd state for x86 */ thread_set_self_context((void *)sc_full.sc); ASSERT_NOT_REACHED(); } #ifdef LINUX static bool sig_has_restorer(thread_sig_info_t *info, int sig) { # ifdef VMX86_SERVER /* vmkernel ignores SA_RESTORER (PR 405694) */ return false; # endif if (info->app_sigaction[sig] == NULL) return false; if (TEST(SA_RESTORER, info->app_sigaction[sig]->flags)) return true; if (info->app_sigaction[sig]->restorer == NULL) return false; /* we cache the result due to the safe_read cost */ if (info->restorer_valid[sig] == -1) { /* With older kernels, don't seem to need flag: if sa_restorer != * NULL kernel will use it. But with newer kernels that's not * true, and sometimes libc does pass non-NULL. */ # ifdef X86 /* Signal restorer code for Ubuntu 7.04: * 0xffffe420 <__kernel_sigreturn+0>: pop %eax * 0xffffe421 <__kernel_sigreturn+1>: mov $0x77,%eax * 0xffffe426 <__kernel_sigreturn+6>: int $0x80 * * 0xffffe440 <__kernel_rt_sigreturn+0>: mov $0xad,%eax * 0xffffe445 <__kernel_rt_sigreturn+5>: int $0x80 */ static const byte SIGRET_NONRT[8] = { 0x58, 0xb8, 0x77, 0x00, 0x00, 0x00, 0xcd, 0x80 }; static const byte SIGRET_RT[8] = { 0xb8, 0xad, 0x00, 0x00, 0x00, 0xcd, 0x80 }; # elif defined(ARM) static const byte SIGRET_NONRT[8] = { 0x77, 0x70, 0xa0, 0xe3, 0x00, 0x00, 0x00, 0xef }; static const byte SIGRET_RT[8] = { 0xad, 0x70, 0xa0, 0xe3, 0x00, 0x00, 0x00, 0xef }; # elif defined(AARCH64) static const byte SIGRET_NONRT[8] = { 0 }; /* unused */ static const byte SIGRET_RT[8] = /* FIXME i#1569: untested */ /* mov w8, #139 ; svc #0 */ { 0x68, 0x11, 0x80, 0x52, 0x01, 0x00, 0x00, 0xd4 }; # endif byte buf[MAX(sizeof(SIGRET_NONRT), sizeof(SIGRET_RT))] = { 0 }; if (safe_read(info->app_sigaction[sig]->restorer, sizeof(buf), buf) && ((IS_RT_FOR_APP(info, sig) && memcmp(buf, SIGRET_RT, sizeof(SIGRET_RT)) == 0) || (!IS_RT_FOR_APP(info, sig) && memcmp(buf, SIGRET_NONRT, sizeof(SIGRET_NONRT)) == 0))) { LOG(THREAD_GET, LOG_ASYNCH, 2, "sig_has_restorer %d: " PFX " looks like restorer, using w/o flag\n", sig, info->app_sigaction[sig]->restorer); info->restorer_valid[sig] = 1; } else info->restorer_valid[sig] = 0; } return (info->restorer_valid[sig] == 1); } #endif /* Returns the size of the frame for delivering to the app. * For x64 this does NOT include kernel_fpstate_t. */ static uint get_app_frame_size(thread_sig_info_t *info, int sig) { if (IS_RT_FOR_APP(info, sig)) return sizeof(sigframe_rt_t); #ifdef LINUX else return sizeof(sigframe_plain_t); #endif } static kernel_ucontext_t * get_ucontext_from_rt_frame(sigframe_rt_t *frame) { #if defined(MACOS) && !defined(X64) /* Padding makes it unsafe to access uc on frame from kernel */ return frame->puc; #else return &frame->uc; #endif } sigcontext_t * get_sigcontext_from_rt_frame(sigframe_rt_t *frame) { return SIGCXT_FROM_UCXT(get_ucontext_from_rt_frame(frame)); } static sigcontext_t * get_sigcontext_from_app_frame(thread_sig_info_t *info, int sig, void *frame) { sigcontext_t *sc = NULL; /* initialize to satisfy Mac clang */ bool rtframe = IS_RT_FOR_APP(info, sig); if (rtframe) sc = get_sigcontext_from_rt_frame((sigframe_rt_t *)frame); #ifdef LINUX else { # ifdef X86 sc = (sigcontext_t *)&(((sigframe_plain_t *)frame)->sc); # elif defined(ARM) sc = SIGCXT_FROM_UCXT(&(((sigframe_plain_t *)frame)->uc)); # else ASSERT_NOT_REACHED(); # endif } #endif return sc; } static sigcontext_t * get_sigcontext_from_pending(thread_sig_info_t *info, int sig) { ASSERT(info->sigpending[sig] != NULL); return get_sigcontext_from_rt_frame(&info->sigpending[sig]->rt_frame); } /* Returns the address on the appropriate signal stack where we should copy * the frame. * If frame is NULL, assumes signal happened while in DR and has been delayed, * and thus we need to provide fpstate regardless of whether the original * had it. If frame is non-NULL, matches frame's amount of fpstate. */ static byte * get_sigstack_frame_ptr(dcontext_t *dcontext, int sig, sigframe_rt_t *frame) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; sigcontext_t *sc = (frame == NULL) ? get_sigcontext_from_pending(info, sig) : get_sigcontext_from_rt_frame(frame); byte *sp; if (frame != NULL) { /* signal happened while in cache, grab interrupted xsp */ sp = (byte *)sc->SC_XSP; LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: using frame's xsp " PFX "\n", sp); } else { /* signal happened while in DR, use stored xsp */ sp = (byte *)get_mcontext(dcontext)->xsp; LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: using app xsp " PFX "\n", sp); } if (USE_APP_SIGSTACK(info, sig)) { /* app has own signal stack which is enabled for this handler */ LOG(THREAD, LOG_ASYNCH, 3, "get_sigstack_frame_ptr: app has own stack " PFX "\n", info->app_sigstack.ss_sp); LOG(THREAD, LOG_ASYNCH, 3, "\tcur sp=" PFX " vs app stack " PFX "-" PFX "\n", sp, info->app_sigstack.ss_sp, info->app_sigstack.ss_sp + info->app_sigstack.ss_size); if (sp > (byte *)info->app_sigstack.ss_sp && sp - (byte *)info->app_sigstack.ss_sp < info->app_sigstack.ss_size) { /* we're currently in the alt stack, so use current xsp */ LOG(THREAD, LOG_ASYNCH, 3, "\tinside alt stack, so using current xsp " PFX "\n", sp); } else { /* need to go to top, stack grows down */ sp = info->app_sigstack.ss_sp + info->app_sigstack.ss_size; LOG(THREAD, LOG_ASYNCH, 3, "\tnot inside alt stack, so using base xsp " PFX "\n", sp); } } /* now get frame pointer: need to go down to first field of frame */ sp -= get_app_frame_size(info, sig); #if defined(LINUX) && defined(X86) if (frame == NULL) { /* XXX i#641: we always include space for full xstate, * even if we don't use it all, which does not match what the * kernel does, but we're not tracking app actions to know whether * we can skip lazy fpstate on the delay */ sp -= signal_frame_extra_size(true); } else { if (sc->fpstate != NULL) { /* The kernel doesn't seem to lazily include avx, so we don't either, * which simplifies all our frame copying: if YMM_ENABLED() and the * fpstate pointer is non-NULL, then we assume there's space for * full xstate */ sp -= signal_frame_extra_size(true); DOCHECK(1, { if (YMM_ENABLED()) { ASSERT_CURIOSITY(sc->fpstate->sw_reserved.magic1 == FP_XSTATE_MAGIC1); ASSERT(sc->fpstate->sw_reserved.extended_size <= signal_frame_extra_size(true)); } }); } } #endif /* LINUX && X86 */ /* PR 369907: don't forget the redzone */ sp -= REDZONE_SIZE; /* Align to 16-bytes. The kernel does this for both 32 and 64-bit code * these days, so we do as well. */ sp = (byte *)ALIGN_BACKWARD(sp, 16); IF_X86(sp -= sizeof(reg_t)); /* Model retaddr. */ LOG(THREAD, LOG_ASYNCH, 3, "\tplacing frame at " PFX "\n", sp); return sp; } #if defined(LINUX) && !defined(X64) static void convert_rt_mask_to_nonrt(sigframe_plain_t *f_plain, kernel_sigset_t *sigmask) { # ifdef X86 f_plain->sc.oldmask = sigmask->sig[0]; memcpy(&f_plain->extramask, &sigmask->sig[1], (_NSIG_WORDS - 1) * sizeof(uint)); # elif defined(ARM) f_plain->uc.uc_mcontext.oldmask = sigmask->sig[0]; memcpy(&f_plain->uc.sigset_ex, &sigmask->sig[1], (_NSIG_WORDS - 1) * sizeof(uint)); # else # error NYI # endif } static void convert_frame_to_nonrt(dcontext_t *dcontext, int sig, sigframe_rt_t *f_old, sigframe_plain_t *f_new) { # ifdef X86 sigcontext_t *sc_old = get_sigcontext_from_rt_frame(f_old); f_new->pretcode = f_old->pretcode; f_new->sig = f_old->sig; memcpy(&f_new->sc, get_sigcontext_from_rt_frame(f_old), sizeof(sigcontext_t)); if (sc_old->fpstate != NULL) { /* up to caller to include enough space for fpstate at end */ byte *new_fpstate = (byte *)ALIGN_FORWARD(((byte *)f_new) + sizeof(*f_new), XSTATE_ALIGNMENT); memcpy(new_fpstate, sc_old->fpstate, signal_frame_extra_size(false)); f_new->sc.fpstate = (kernel_fpstate_t *)new_fpstate; } convert_rt_mask_to_nonrt(f_new, &f_old->uc.uc_sigmask); memcpy(&f_new->retcode, &f_old->retcode, RETCODE_SIZE); /* now fill in our extra field */ f_new->sig_noclobber = f_new->sig; # elif defined(ARM) memcpy(&f_new->uc, &f_old->uc, sizeof(f_new->uc)); memcpy(f_new->retcode, f_old->retcode, sizeof(f_new->retcode)); /* now fill in our extra field */ f_new->sig_noclobber = f_old->info.si_signo; # endif /* X86 */ LOG(THREAD, LOG_ASYNCH, 3, "\tconverted sig=%d rt frame to non-rt frame\n", f_new->sig_noclobber); } #endif /* Exported for call from master_signal_handler asm routine. * For the rt signal frame f_old that was copied to f_new, updates * the intra-frame absolute pointers to point to the new addresses * in f_new. * Only updates the pretcode to the stored app restorer if for_app. */ void fixup_rtframe_pointers(dcontext_t *dcontext, int sig, sigframe_rt_t *f_old, sigframe_rt_t *f_new, bool for_app) { if (dcontext == NULL) dcontext = get_thread_private_dcontext(); ASSERT(dcontext != NULL); #if defined(X86) && defined(LINUX) thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; bool has_restorer = sig_has_restorer(info, sig); # ifdef DEBUG uint level = 3; # if !defined(HAVE_MEMINFO) /* avoid logging every single TRY probe fault */ if (!dynamo_initialized) level = 5; # endif # endif if (has_restorer && for_app) f_new->pretcode = (char *)info->app_sigaction[sig]->restorer; else { # ifdef VMX86_SERVER /* PR 404712: skip kernel's restorer code */ if (for_app) f_new->pretcode = (char *)dynamorio_sigreturn; # else # ifdef X64 ASSERT(!for_app || doing_detach); /* detach uses a frame to go native */ # else /* only point at retcode if old one was -- with newer OS, points at * vsyscall page and there is no restorer, yet stack restorer code left * there for gdb compatibility */ if (f_old->pretcode == f_old->retcode) f_new->pretcode = f_new->retcode; /* else, pointing at vsyscall, or we set it to dynamorio_sigreturn in * master_signal_handler */ LOG(THREAD, LOG_ASYNCH, level, "\tleaving pretcode with old value\n"); # endif # endif } # ifndef X64 f_new->pinfo = &(f_new->info); f_new->puc = &(f_new->uc); # endif if (f_old->uc.uc_mcontext.fpstate != NULL) { uint frame_size = get_app_frame_size(info, sig); byte *frame_end = ((byte *)f_new) + frame_size; byte *tgt = (byte *)ALIGN_FORWARD(frame_end, XSTATE_ALIGNMENT); ASSERT(tgt - frame_end <= signal_frame_extra_size(true)); memcpy(tgt, f_old->uc.uc_mcontext.fpstate, sizeof(kernel_fpstate_t)); f_new->uc.uc_mcontext.fpstate = (kernel_fpstate_t *)tgt; if (YMM_ENABLED()) { kernel_xstate_t *xstate_new = (kernel_xstate_t *)tgt; kernel_xstate_t *xstate_old = (kernel_xstate_t *)f_old->uc.uc_mcontext.fpstate; memcpy(&xstate_new->xstate_hdr, &xstate_old->xstate_hdr, sizeof(xstate_new->xstate_hdr)); memcpy(&xstate_new->ymmh, &xstate_old->ymmh, sizeof(xstate_new->ymmh)); } LOG(THREAD, LOG_ASYNCH, level + 1, "\tfpstate old=" PFX " new=" PFX "\n", f_old->uc.uc_mcontext.fpstate, f_new->uc.uc_mcontext.fpstate); } else { /* if fpstate is not set up, we're delivering signal immediately, * and we shouldn't need an fpstate since DR code won't modify it; * only if we delayed will we need it, and when delaying we make * room and set up the pointer in copy_frame_to_pending. * xref i#641. */ LOG(THREAD, LOG_ASYNCH, level + 1, "\tno fpstate needed\n"); } LOG(THREAD, LOG_ASYNCH, level, "\tretaddr = " PFX "\n", f_new->pretcode); # ifdef RETURN_AFTER_CALL info->signal_restorer_retaddr = (app_pc)f_new->pretcode; # endif /* 32-bit kernel copies to aligned buf first */ IF_X64(ASSERT(ALIGNED(f_new->uc.uc_mcontext.fpstate, 16))); #elif defined(MACOS) # ifndef X64 f_new->pinfo = &(f_new->info); f_new->puc = &(f_new->uc); # endif f_new->puc->uc_mcontext = (IF_X64_ELSE(_STRUCT_MCONTEXT64, _STRUCT_MCONTEXT32) *)&f_new->mc; LOG(THREAD, LOG_ASYNCH, 3, "\tf_new=" PFX ", &handler=" PFX "\n", f_new, &f_new->handler); ASSERT(!for_app || ALIGNED(&f_new->handler, 16)); #endif /* X86 && LINUX */ } /* Only operates on rt frames, so call before converting to plain. * Must be called *after* translating the sigcontext. */ static void fixup_siginfo(dcontext_t *dcontext, int sig, sigframe_rt_t *frame) { /* For some signals, si_addr is a PC which we must translate. */ if (sig != SIGILL && sig != SIGTRAP && sig != SIGFPE) return; /* nothing to do */ sigcontext_t *sc = get_sigcontext_from_rt_frame(frame); kernel_siginfo_t *siginfo = SIGINFO_FROM_RT_FRAME(frame); LOG(THREAD, LOG_ASYNCH, 3, "%s: updating si_addr from " PFX " to " PFX "\n", __FUNCTION__, siginfo->si_addr, sc->SC_XIP); siginfo->si_addr = (void *)sc->SC_XIP; #ifdef LINUX siginfo->si_addr_lsb = sc->SC_XIP & 0x1; #endif } static void memcpy_rt_frame(sigframe_rt_t *frame, byte *dst, bool from_pending) { #if defined(MACOS) && !defined(X64) if (!from_pending) { /* The kernel puts padding in the middle. We collapse that padding here * and re-align when we copy to the app stack. * We should not reference fields from mc onward in what the kernel put * on the stack, as our sigframe_rt_t layout does not match the kernel's * variable mid-struct padding. */ sigcontext_t *sc = SIGCXT_FROM_UCXT(frame->puc); memcpy(dst, frame, offsetof(sigframe_rt_t, puc) + sizeof(frame->puc)); memcpy(&((sigframe_rt_t *)dst)->mc, sc, sizeof(sigframe_rt_t) - offsetof(sigframe_rt_t, mc)); return; } #endif memcpy(dst, frame, sizeof(sigframe_rt_t)); } /* Copies frame to sp. * PR 304708: we now leave in rt form right up until we copy to the * app stack, so that we can deliver to a client at a safe spot * in rt form, so this routine now converts to a plain frame if necessary. * If no restorer, touches up pretcode * (and if rt_frame, touches up pinfo and puc) * Also touches up fpstate pointer */ static void copy_frame_to_stack(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, byte *sp, bool from_pending) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; bool rtframe = IS_RT_FOR_APP(info, sig); uint frame_size = get_app_frame_size(info, sig); #if defined(LINUX) && defined(X86_32) bool has_restorer = sig_has_restorer(info, sig); #endif byte *flush_pc; bool stack_unwritable = false; uint size = frame_size; #if defined(LINUX) && defined(X86) sigcontext_t *sc = get_sigcontext_from_rt_frame(frame); size += (sc->fpstate == NULL ? 0 : signal_frame_extra_size(true)); #endif /* LINUX && X86 */ LOG(THREAD, LOG_ASYNCH, 3, "copy_frame_to_stack: rt=%d, src=" PFX ", sp=" PFX "\n", rtframe, frame, sp); fixup_siginfo(dcontext, sig, frame); /* We avoid querying memory as it incurs global contended locks. */ flush_pc = is_executable_area_writable_overlap(sp, sp + size); if (flush_pc != NULL) { LOG(THREAD, LOG_ASYNCH, 2, "\tcopy_frame_to_stack: part of stack is unwritable-by-us @" PFX "\n", flush_pc); flush_fragments_and_remove_region(dcontext, flush_pc, sp + size - flush_pc, false /* don't own initexit_lock */, false /* keep futures */); } TRY_EXCEPT(dcontext, /* try */ { if (rtframe) { ASSERT(frame_size == sizeof(*frame)); memcpy_rt_frame(frame, sp, from_pending); } IF_NOT_X64( IF_LINUX(else convert_frame_to_nonrt(dcontext, sig, frame, (sigframe_plain_t *)sp);)); }, /* except */ { stack_unwritable = true; }); if (stack_unwritable) { /* Override the no-nested check in record_pending_signal(): it's ok b/c * receive_pending_signal() calls to here at a consistent point, * and we won't return there. */ info->nested_pending_ok = true; /* Just throw away this signal and deliver SIGSEGV instead with the * same sigcontext, like the kernel does. */ free_pending_signal(info, sig); os_forge_exception(0, UNREADABLE_MEMORY_EXECUTION_EXCEPTION); ASSERT_NOT_REACHED(); } kernel_sigset_t *mask_to_restore = NULL; if (info->pre_syscall_app_sigprocmask_valid) { mask_to_restore = &info->pre_syscall_app_sigprocmask; info->pre_syscall_app_sigprocmask_valid = false; } else { mask_to_restore = &info->app_sigblocked; } /* if !has_restorer we do NOT add the restorer code to the exec list here, * to avoid removal problems (if handler never returns) and consistency problems * (would have to mark as selfmod right now if on stack). * for PROGRAM_SHEPHERDING we recognize as a pattern, and for consistency we * allow entire region once try to execute -- not a performance worry since should * very rarely be on the stack: should either be libc restorer code or with recent * OS in rx vsyscall page. */ /* fix up pretcode, pinfo, puc, fpstate */ if (rtframe) { sigframe_rt_t *f_new = (sigframe_rt_t *)sp; fixup_rtframe_pointers(dcontext, sig, frame, f_new, true /*for app*/); #ifdef HAVE_SIGALTSTACK /* Make sure the frame's sigstack reflects the app stack, both for transparency * of the app examining it and for correctness if we detach mid-handler. */ LOG(THREAD, LOG_ASYNCH, 3, "updated uc_stack @" PFX " to " PFX "\n", &f_new->uc.uc_stack, info->app_sigstack.ss_sp); f_new->uc.uc_stack = info->app_sigstack; #endif /* Store the prior mask, for restoring in sigreturn. */ memcpy(&f_new->uc.uc_sigmask, mask_to_restore, sizeof(info->app_sigblocked)); } else { #ifdef X64 ASSERT_NOT_REACHED(); #endif #if defined(LINUX) && !defined(X64) sigframe_plain_t *f_new = (sigframe_plain_t *)sp; # ifdef X86 # ifndef VMX86_SERVER sigframe_plain_t *f_old = (sigframe_plain_t *)frame; # endif if (has_restorer) f_new->pretcode = (char *)info->app_sigaction[sig]->restorer; else { # ifdef VMX86_SERVER /* PR 404712: skip kernel's restorer code */ f_new->pretcode = (char *)dynamorio_nonrt_sigreturn; # else /* see comments in rt case above */ if (f_old->pretcode == f_old->retcode) f_new->pretcode = f_new->retcode; else { /* whether we set to dynamorio_sigreturn in master_signal_handler * or it's still vsyscall page, we have to convert to non-rt */ f_new->pretcode = (char *)dynamorio_nonrt_sigreturn; } /* else, pointing at vsyscall most likely */ LOG(THREAD, LOG_ASYNCH, 3, "\tleaving pretcode with old value\n"); # endif } /* convert_frame_to_nonrt*() should have updated fpstate pointer. * The inlined fpstate is no longer used on new kernels, and we do that * as well on older kernels. */ ASSERT(f_new->sc.fpstate != &f_new->fpstate); /* 32-bit kernel copies to aligned buf so no assert on fpstate alignment */ LOG(THREAD, LOG_ASYNCH, 3, "\tretaddr = " PFX "\n", f_new->pretcode); /* There is no stored alt stack in a plain frame to update. */ # ifdef RETURN_AFTER_CALL info->signal_restorer_retaddr = (app_pc)f_new->pretcode; # endif # endif /* X86 */ /* Store the prior mask, for restoring in sigreturn. */ convert_rt_mask_to_nonrt(f_new, mask_to_restore); #endif /* LINUX && !X64 */ } #ifdef MACOS /* Update handler field, which is passed to the libc trampoline, to app */ ASSERT(info->app_sigaction[sig] != NULL); ((sigframe_rt_t *)sp)->handler = (app_pc)info->app_sigaction[sig]->handler; #endif } /* Copies frame to pending slot. * PR 304708: we now leave in rt form right up until we copy to the * app stack, so that we can deliver to a client at a safe spot * in rt form. */ static void copy_frame_to_pending(dcontext_t *dcontext, int sig, sigframe_rt_t *frame _IF_CLIENT(byte *access_address)) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; sigframe_rt_t *dst = &(info->sigpending[sig]->rt_frame); memcpy_rt_frame(frame, (byte *)dst, false /*!already pending*/); #if defined(LINUX) && defined(X86) /* For lazy fpstate, it's possible there was no fpstate when the kernel * sent us the frame, but in between then and now the app executed some * fp or xmm/ymm instrs. Today we always add fpstate just in case. * XXX i#641 optimization: track whether any fp/xmm/ymm * instrs happened and avoid this. */ /* we'll fill in updated fpstate at delivery time, but we go ahead and * copy now in case our own retrieval somehow misses some fields */ if (frame->uc.uc_mcontext.fpstate != NULL) { memcpy(&info->sigpending[sig]->xstate, frame->uc.uc_mcontext.fpstate, /* XXX: assuming full xstate if avx is enabled */ signal_frame_extra_size(false)); } /* we must set the pointer now so that later save_fpstate, etc. work */ dst->uc.uc_mcontext.fpstate = (kernel_fpstate_t *)&info->sigpending[sig]->xstate; #endif /* LINUX && X86 */ #ifdef CLIENT_INTERFACE info->sigpending[sig]->access_address = access_address; #endif info->sigpending[sig]->use_sigcontext = false; #ifdef MACOS /* We rely on puc to find sc to we have to fix it up */ fixup_rtframe_pointers(dcontext, sig, frame, dst, false /*!for app*/); #endif LOG(THREAD, LOG_ASYNCH, 3, "copy_frame_to_pending from " PFX "\n", frame); DOLOG(3, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 3, "sigcontext:\n"); dump_sigcontext(dcontext, get_sigcontext_from_rt_frame(dst)); }); } /**** real work ***********************************************/ /* transfer control from signal handler to fcache return routine */ static void transfer_from_sig_handler_to_fcache_return(dcontext_t *dcontext, kernel_ucontext_t *uc, sigcontext_t *sc_interrupted, int sig, app_pc next_pc, linkstub_t *last_exit, bool is_kernel_xfer) { sigcontext_t *sc = SIGCXT_FROM_UCXT(uc); #ifdef CLIENT_INTERFACE if (is_kernel_xfer) { sig_full_cxt_t sc_interrupted_full = { sc_interrupted, NULL /*not provided*/ }; sig_full_cxt_t sc_full; sig_full_initialize(&sc_full, uc); sc->SC_XIP = (ptr_uint_t)next_pc; if (instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_DELIVERY, sc_interrupted_full, NULL, NULL, next_pc, sc->SC_XSP, sc_full, NULL, sig)) next_pc = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP); } #endif dcontext->next_tag = canonicalize_pc_target(dcontext, next_pc); IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL)); /* Set our sigreturn context to point to fcache_return! * Then we'll go back through kernel, appear in fcache_return, * and go through dispatch & interp, without messing up dynamo stack. * Note that even if this is a write in the shared cache, we * still go to the private fcache_return for simplicity. */ sc->SC_XIP = (ptr_uint_t)fcache_return_routine(dcontext); #ifdef AARCHXX /* We do not have to set dr_reg_stolen in dcontext's mcontext here * because dcontext's mcontext is stale and we used the mcontext * created from recreate_app_state_internal with the original sigcontext. */ /* We restore dr_reg_stolen's app value in recreate_app_state_internal, * so now we need set dr_reg_stolen to hold DR's TLS before sigreturn * from DR's handler. */ ASSERT(get_sigcxt_stolen_reg(sc) != (reg_t)*get_dr_tls_base_addr()); set_sigcxt_stolen_reg(sc, (reg_t)*get_dr_tls_base_addr()); # ifndef AARCH64 /* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */ set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE); # endif #endif #if defined(X64) || defined(ARM) /* x64 always uses shared gencode */ get_local_state_extended()->spill_space.IF_X86_ELSE(xax, r0) = sc->IF_X86_ELSE(SC_XAX, SC_R0); # ifdef AARCH64 /* X1 needs to be spilled because of br x1 in exit stubs. */ get_local_state_extended()->spill_space.r1 = sc->SC_R1; # endif #else get_mcontext(dcontext)->IF_X86_ELSE(xax, r0) = sc->IF_X86_ELSE(SC_XAX, SC_R0); #endif LOG(THREAD, LOG_ASYNCH, 2, "\tsaved xax " PFX "\n", sc->IF_X86_ELSE(SC_XAX, SC_R0)); sc->IF_X86_ELSE(SC_XAX, SC_R0) = (ptr_uint_t)last_exit; LOG(THREAD, LOG_ASYNCH, 2, "\tset next_tag to " PFX ", resuming in fcache_return\n", next_pc); LOG(THREAD, LOG_ASYNCH, 3, "transfer_from_sig_handler_to_fcache_return\n"); DOLOG(3, LOG_ASYNCH, { LOG(THREAD, LOG_ASYNCH, 3, "sigcontext @" PFX ":\n", sc); dump_sigcontext(dcontext, sc); }); } #ifdef CLIENT_INTERFACE static dr_signal_action_t send_signal_to_client(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, sigcontext_t *raw_sc, byte *access_address, bool blocked, fragment_t *fragment) { kernel_ucontext_t *uc = get_ucontext_from_rt_frame(frame); dr_siginfo_t si; dr_signal_action_t action; /* XXX #1615: we need a full ucontext to store pre-xl8 simd values. * Right now we share the same simd values with post-xl8. */ sig_full_cxt_t raw_sc_full; sig_full_initialize(&raw_sc_full, uc); raw_sc_full.sc = raw_sc; if (!dr_signal_hook_exists()) return DR_SIGNAL_DELIVER; LOG(THREAD, LOG_ASYNCH, 2, "sending signal to client\n"); si.sig = sig; si.drcontext = (void *)dcontext; /* It's safe to allocate since we do not send signals that interrupt DR. * With priv_mcontext_t x2 that's a little big for stack alloc. */ si.mcontext = heap_alloc(dcontext, sizeof(*si.mcontext) HEAPACCT(ACCT_OTHER)); si.raw_mcontext = heap_alloc(dcontext, sizeof(*si.raw_mcontext) HEAPACCT(ACCT_OTHER)); dr_mcontext_init(si.mcontext); dr_mcontext_init(si.raw_mcontext); /* i#207: fragment tag and fcache start pc on fault. */ si.fault_fragment_info.tag = NULL; si.fault_fragment_info.cache_start_pc = NULL; /* i#182/PR 449996: we provide the pre-translation context */ if (raw_sc != NULL) { fragment_t wrapper; si.raw_mcontext_valid = true; sigcontext_to_mcontext(dr_mcontext_as_priv_mcontext(si.raw_mcontext), &raw_sc_full, si.raw_mcontext->flags); /* i#207: fragment tag and fcache start pc on fault. */ /* FIXME: we should avoid the fragment_pclookup since it is expensive * and since we already did the work of a lookup when translating */ if (fragment == NULL) fragment = fragment_pclookup(dcontext, si.raw_mcontext->pc, &wrapper); if (fragment != NULL && !hide_tag_from_client(fragment->tag)) { si.fault_fragment_info.tag = fragment->tag; si.fault_fragment_info.cache_start_pc = FCACHE_ENTRY_PC(fragment); si.fault_fragment_info.is_trace = TEST(FRAG_IS_TRACE, fragment->flags); si.fault_fragment_info.app_code_consistent = !TESTANY(FRAG_WAS_DELETED | FRAG_SELFMOD_SANDBOXED, fragment->flags); } } else si.raw_mcontext_valid = false; /* The client has no way to calculate this when using * instrumentation that deliberately faults (to shift a rare event * out of the fastpath) so we provide it. When raw_mcontext is * available the client can calculate it, but we provide it as a * convenience anyway. */ si.access_address = access_address; si.blocked = blocked; ucontext_to_mcontext(dr_mcontext_as_priv_mcontext(si.mcontext), uc); /* We disallow the client calling dr_redirect_execution(), so we * will not leak si */ action = instrument_signal(dcontext, &si); if (action == DR_SIGNAL_DELIVER || action == DR_SIGNAL_REDIRECT) { /* propagate client changes */ CLIENT_ASSERT(si.mcontext->flags == DR_MC_ALL, "signal mcontext flags cannot be changed"); mcontext_to_ucontext(uc, dr_mcontext_as_priv_mcontext(si.mcontext)); } else if (action == DR_SIGNAL_SUPPRESS && raw_sc != NULL) { /* propagate client changes */ CLIENT_ASSERT(si.raw_mcontext->flags == DR_MC_ALL, "signal mcontext flags cannot be changed"); mcontext_to_sigcontext(&raw_sc_full, dr_mcontext_as_priv_mcontext(si.raw_mcontext), si.raw_mcontext->flags); } heap_free(dcontext, si.mcontext, sizeof(*si.mcontext) HEAPACCT(ACCT_OTHER)); heap_free(dcontext, si.raw_mcontext, sizeof(*si.raw_mcontext) HEAPACCT(ACCT_OTHER)); return action; } /* Returns false if caller should exit */ static bool handle_client_action_from_cache(dcontext_t *dcontext, int sig, dr_signal_action_t action, sigframe_rt_t *our_frame, sigcontext_t *sc_orig, sigcontext_t *sc_interrupted, bool blocked) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; kernel_ucontext_t *uc = get_ucontext_from_rt_frame(our_frame); sigcontext_t *sc = SIGCXT_FROM_UCXT(uc); /* in order to pass to the client, we come all the way here for signals * the app has no handler for */ if (action == DR_SIGNAL_REDIRECT) { /* send_signal_to_client copied mcontext into our * master_signal_handler frame, so we set up for fcache_return w/ * our frame's state */ transfer_from_sig_handler_to_fcache_return( dcontext, uc, sc_interrupted, sig, (app_pc)sc->SC_XIP, (linkstub_t *)get_asynch_linkstub(), true); if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } return false; } else if (action == DR_SIGNAL_SUPPRESS || (!blocked && info->app_sigaction[sig] != NULL && info->app_sigaction[sig]->handler == (handler_t)SIG_IGN)) { LOG(THREAD, LOG_ASYNCH, 2, "%s: not delivering!\n", (action == DR_SIGNAL_SUPPRESS) ? "client suppressing signal" : "app signal handler is SIG_IGN"); /* restore original (untranslated) sc */ *get_sigcontext_from_rt_frame(our_frame) = *sc_orig; return false; } else if (!blocked && /* no BYPASS for blocked */ (action == DR_SIGNAL_BYPASS || (info->app_sigaction[sig] == NULL || info->app_sigaction[sig]->handler == (handler_t)SIG_DFL))) { LOG(THREAD, LOG_ASYNCH, 2, "%s: executing default action\n", (action == DR_SIGNAL_BYPASS) ? "client forcing default" : "app signal handler is SIG_DFL"); if (execute_default_from_cache(dcontext, sig, our_frame, sc_orig, false)) { /* if we haven't terminated, restore original (untranslated) sc * on request. */ *get_sigcontext_from_rt_frame(our_frame) = *sc_orig; LOG(THREAD, LOG_ASYNCH, 2, "%s: restored xsp=" PFX ", xip=" PFX "\n", __FUNCTION__, get_sigcontext_from_rt_frame(our_frame)->SC_XSP, get_sigcontext_from_rt_frame(our_frame)->SC_XIP); } return false; } CLIENT_ASSERT(action == DR_SIGNAL_DELIVER, "invalid signal event return value"); return true; } #endif static void abort_on_fault(dcontext_t *dcontext, uint dumpcore_flag, app_pc pc, byte *target, int sig, sigframe_rt_t *frame, const char *prefix, const char *signame, const char *where) { kernel_ucontext_t *ucxt = &frame->uc; sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt); bool stack_overflow = (sig == SIGSEGV && is_stack_overflow(dcontext, target)); #if defined(STATIC_LIBRARY) && defined(LINUX) thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; uint orig_dumpcore_flag = dumpcore_flag; if (init_info.app_sigaction != NULL) info = &init_info; /* use init-time handler */ ASSERT(info->app_sigaction != NULL); #endif const char *fmt = "%s %s at PC " PFX "\n" "Received SIG%s at%s pc " PFX " in thread " TIDFMT "\n" "Base: " PFX "\n" "Registers:" #ifdef X86 "eax=" PFX " ebx=" PFX " ecx=" PFX " edx=" PFX "\n" "\tesi=" PFX " edi=" PFX " esp=" PFX " ebp=" PFX "\n" # ifdef X64 "\tr8 =" PFX " r9 =" PFX " r10=" PFX " r11=" PFX "\n" "\tr12=" PFX " r13=" PFX " r14=" PFX " r15=" PFX "\n" # endif /* X64 */ #elif defined(ARM) # ifndef X64 " r0 =" PFX " r1 =" PFX " r2 =" PFX " r3 =" PFX "\n" "\tr4 =" PFX " r5 =" PFX " r6 =" PFX " r7 =" PFX "\n" "\tr8 =" PFX " r9 =" PFX " r10=" PFX " r11=" PFX "\n" "\tr12=" PFX " r13=" PFX " r14=" PFX " r15=" PFX "\n" # else # error NYI on AArch64 # endif #endif /* X86/ARM */ "\teflags=" PFX; #if defined(STATIC_LIBRARY) && defined(LINUX) /* i#2119: if we're invoking an app handler, disable a fatal coredump. */ if (INTERNAL_OPTION(invoke_app_on_crash) && info->app_sigaction[sig] != NULL && IS_RT_FOR_APP(info, sig) && TEST(dumpcore_flag, DYNAMO_OPTION(dumpcore_mask)) && !DYNAMO_OPTION(live_dump)) dumpcore_flag = 0; #endif report_dynamorio_problem( dcontext, dumpcore_flag | (stack_overflow ? DUMPCORE_STACK_OVERFLOW : 0), pc, (app_pc)sc->SC_FP, fmt, prefix, stack_overflow ? STACK_OVERFLOW_NAME : CRASH_NAME, pc, signame, where, pc, get_thread_id(), get_dynamorio_dll_start(), #ifdef X86 sc->SC_XAX, sc->SC_XBX, sc->SC_XCX, sc->SC_XDX, sc->SC_XSI, sc->SC_XDI, sc->SC_XSP, sc->SC_XBP, # ifdef X64 sc->SC_FIELD(r8), sc->SC_FIELD(r9), sc->SC_FIELD(r10), sc->SC_FIELD(r11), sc->SC_FIELD(r12), sc->SC_FIELD(r13), sc->SC_FIELD(r14), sc->SC_FIELD(r15), # endif /* X86 */ #elif defined(ARM) # ifndef X64 sc->SC_FIELD(arm_r0), sc->SC_FIELD(arm_r1), sc->SC_FIELD(arm_r2), sc->SC_FIELD(arm_r3), sc->SC_FIELD(arm_r4), sc->SC_FIELD(arm_r5), sc->SC_FIELD(arm_r6), sc->SC_FIELD(arm_r7), sc->SC_FIELD(arm_r8), sc->SC_FIELD(arm_r9), sc->SC_FIELD(arm_r10), sc->SC_FIELD(arm_fp), sc->SC_FIELD(arm_ip), sc->SC_FIELD(arm_sp), sc->SC_FIELD(arm_lr), sc->SC_FIELD(arm_pc), # else # error NYI on AArch64 # endif /* X64 */ #endif /* X86/ARM */ sc->SC_XFLAGS); #if defined(STATIC_LIBRARY) && defined(LINUX) /* i#2119: For static DR, the surrounding app's handler may well be * safe to invoke even when DR state is messed up: it's worth a try, as it * likely has useful reporting features for users of the app. * We limit to Linux and RT for simplicity: it can be expanded later if static * library use expands. */ if (INTERNAL_OPTION(invoke_app_on_crash) && info->app_sigaction[sig] != NULL && IS_RT_FOR_APP(info, sig)) { SYSLOG(SYSLOG_WARNING, INVOKING_APP_HANDLER, 2, get_application_name(), get_application_pid()); (*info->app_sigaction[sig]->handler)(sig, &frame->info, ucxt); /* If the app handler didn't terminate, now get a fatal core. */ if (TEST(orig_dumpcore_flag, DYNAMO_OPTION(dumpcore_mask)) && !DYNAMO_OPTION(live_dump)) os_dump_core("post-app-handler attempt at core dump"); } #endif os_terminate(dcontext, TERMINATE_PROCESS); ASSERT_NOT_REACHED(); } static void abort_on_DR_fault(dcontext_t *dcontext, app_pc pc, byte *target, int sig, sigframe_rt_t *frame, const char *signame, const char *where) { abort_on_fault(dcontext, DUMPCORE_INTERNAL_EXCEPTION, pc, target, sig, frame, exception_label_core, signame, where); ASSERT_NOT_REACHED(); } /* Returns whether unlinked or mangled syscall. * Restored in receive_pending_signal. */ static bool unlink_fragment_for_signal(dcontext_t *dcontext, fragment_t *f, byte *pc /*interruption pc*/) { /* We only come here if we interrupted a fragment in the cache, * or interrupted transition gencode (i#2019), * which means that this thread's DR state is safe, and so it * should be ok to acquire a lock. xref PR 596069. * * There is a race where if two threads hit a signal in the same * shared fragment, the first could re-link after the second * un-links but before the second exits, and the second could then * execute the syscall, resulting in arbitrary delay prior to * signal delivery. We don't want to allocate global memory, * but we could use a static array of counters (since should * be small # of interrupted shared fragments at any one time) * used as refcounts so we only unlink when all are done. * Not bothering to implement now: going to live w/ chance of * long signal delays. xref PR 596069. */ bool changed = false; bool waslinking = is_couldbelinking(dcontext); if (!waslinking) enter_couldbelinking(dcontext, NULL, false); /* may not be linked if trace_relink or something */ if (TEST(FRAG_COARSE_GRAIN, f->flags)) { /* XXX PR 213040: we don't support unlinking coarse, so we try * not to come here, but for indirect branch and other spots * where we don't yet support translation (since can't fault) * we end up w/ no bound on delivery... */ } else if (TEST(FRAG_LINKED_OUTGOING, f->flags)) { LOG(THREAD, LOG_ASYNCH, 3, "\tunlinking outgoing for interrupted F%d\n", f->id); SHARED_FLAGS_RECURSIVE_LOCK(f->flags, acquire, change_linking_lock); // Double-check flags to ensure some other thread didn't unlink // while we waited for the change_linking_lock. if (TEST(FRAG_LINKED_OUTGOING, f->flags)) { unlink_fragment_outgoing(dcontext, f); changed = true; } SHARED_FLAGS_RECURSIVE_LOCK(f->flags, release, change_linking_lock); } else { LOG(THREAD, LOG_ASYNCH, 3, "\toutgoing already unlinked for interrupted F%d\n", f->id); } if (TEST(FRAG_HAS_SYSCALL, f->flags)) { /* Syscalls are signal barriers! * Make sure the next syscall (if any) in f is not executed! * instead go back to dispatch right before the syscall */ /* syscall mangling does a bunch of decodes but only one write, * changing the target of a short jmp, which is atomic * since a one-byte write, so we don't need the change_linking_lock. */ if (mangle_syscall_code(dcontext, f, pc, false /*do not skip exit cti*/)) changed = true; } if (!waslinking) enter_nolinking(dcontext, NULL, false); return changed; } static void relink_interrupted_fragment(dcontext_t *dcontext, thread_sig_info_t *info) { if (info->interrupted == NULL) return; /* i#2066: if we were building a trace, it may already be re-linked */ if (!TEST(FRAG_LINKED_OUTGOING, info->interrupted->flags)) { LOG(THREAD, LOG_ASYNCH, 3, "\tre-linking outgoing for interrupted F%d\n", info->interrupted->id); SHARED_FLAGS_RECURSIVE_LOCK(info->interrupted->flags, acquire, change_linking_lock); /* Double-check flags to ensure some other thread didn't link * while we waited for the change_linking_lock. */ if (!TEST(FRAG_LINKED_OUTGOING, info->interrupted->flags)) { link_fragment_outgoing(dcontext, info->interrupted, false); } SHARED_FLAGS_RECURSIVE_LOCK(info->interrupted->flags, release, change_linking_lock); } if (TEST(FRAG_HAS_SYSCALL, info->interrupted->flags)) { /* restore syscall (they're a barrier to signals, so signal * handler has cur frag exit before it does a syscall) */ if (info->interrupted_pc != NULL) { mangle_syscall_code(dcontext, info->interrupted, info->interrupted_pc, true /*skip exit cti*/); } } info->interrupted = NULL; info->interrupted_pc = NULL; } static bool interrupted_inlined_syscall(dcontext_t *dcontext, fragment_t *f, byte *pc /*interruption pc*/) { bool pre_or_post_syscall = false; if (TEST(FRAG_HAS_SYSCALL, f->flags)) { /* PR 596147: if the thread is currently in an inlined * syscall when a signal comes in, we can't delay and bound the * delivery time: we need to deliver now. Should decode * backward and see if syscall. We assume our translation of * the interruption state is fine to re-start: i.e., the syscall * is complete if kernel has pc at post-syscall point, and * kernel set EINTR in eax if necessary. */ /* Interrupted fcache, so ok to alloc memory for decode */ instr_t instr; byte *nxt_pc; instr_init(dcontext, &instr); nxt_pc = decode(dcontext, pc, &instr); if (nxt_pc != NULL && instr_valid(&instr) && instr_is_syscall(&instr)) { /* pre-syscall but post-jmp so can't skip syscall */ pre_or_post_syscall = true; } else { size_t syslen = syscall_instr_length(FRAG_ISA_MODE(f->flags)); instr_reset(dcontext, &instr); nxt_pc = decode(dcontext, pc - syslen, &instr); if (nxt_pc != NULL && instr_valid(&instr) && instr_is_syscall(&instr)) { #if defined(X86) && !defined(MACOS) /* decoding backward so check for exit cti jmp prior * to syscall to ensure no mismatch */ instr_reset(dcontext, &instr); nxt_pc = decode(dcontext, pc - syslen - JMP_LONG_LENGTH, &instr); if (nxt_pc != NULL && instr_valid(&instr) && instr_get_opcode(&instr) == OP_jmp) { /* post-inlined-syscall */ pre_or_post_syscall = true; } #else /* On Mac and ARM we have some TLS spills in between so we just * trust that this is a syscall (esp on ARM w/ aligned instrs). */ pre_or_post_syscall = true; #endif } } instr_free(dcontext, &instr); } return pre_or_post_syscall; } /* i#1145: auto-restart syscalls interrupted by signals */ static bool adjust_syscall_for_restart(dcontext_t *dcontext, thread_sig_info_t *info, int sig, sigcontext_t *sc, fragment_t *f, reg_t orig_retval_reg) { byte *pc = (byte *)sc->SC_XIP; int sys_inst_len; if (sc->IF_X86_ELSE(SC_XAX, SC_R0) != -EINTR) { /* The syscall succeeded, so no reason to interrupt. * Some syscalls succeed on a signal coming in. * E.g., SYS_wait4 on SIGCHLD, or reading from a slow device. * XXX: Now that we pass SA_RESTART we should never get here? */ return false; } /* Don't restart if the app's handler says not to */ if (info->app_sigaction[sig] != NULL && !TEST(SA_RESTART, info->app_sigaction[sig]->flags)) { return false; } /* XXX i#1145: some syscalls are never restarted when interrupted by a signal. * We check those that are simple to distinguish below, but not all are. We have * this under an option so it can be disabled if necessary. */ if (!DYNAMO_OPTION(restart_syscalls)) return false; /* Now that we use SA_RESTART we rely on that and ignore our own * inaccurate check sysnum_is_not_restartable(sysnum). * SA_RESTART also means we can just be passed in the register value to restore. */ LOG(THREAD, LOG_ASYNCH, 2, "%s: restored xax/r0 to %ld\n", __FUNCTION__, orig_retval_reg); #ifdef X86 sc->SC_XAX = orig_retval_reg; #elif defined(AARCHXX) sc->SC_R0 = orig_retval_reg; #else # error NYI #endif /* Now adjust the pc to point at the syscall instruction instead of after it, * so when we resume we'll go back to the syscall. * Adjusting solves transparency as well: natively the kernel adjusts * the pc before setting up the signal frame. * We don't pass in the post-syscall pc provided by the kernel because * we want the app pc, not the raw pc. */ dr_isa_mode_t isa_mode; if (is_after_syscall_address(dcontext, pc) || pc == vsyscall_sysenter_return_pc) { isa_mode = dr_get_isa_mode(dcontext); } else { /* We're going to walk back in the fragment, not gencode */ ASSERT(f != NULL); isa_mode = FRAG_ISA_MODE(f->flags); } sys_inst_len = syscall_instr_length(isa_mode); if (pc == vsyscall_sysenter_return_pc) { #ifdef X86 sc->SC_XIP = (ptr_uint_t)(vsyscall_syscall_end_pc - sys_inst_len); /* To restart sysenter we must re-copy xsp into xbp, as xbp is * clobbered by the kernel. * XXX: The kernel points at the int 0x80 in vsyscall on a restart * and so doesn't have to do this: should we do that too? If so we'll * have to avoid interpreting our own hook which is right after the * int 0x80. */ sc->SC_XBP = sc->SC_XSP; #else ASSERT_NOT_REACHED(); #endif } else if (is_after_syscall_address(dcontext, pc)) { /* We're at do_syscall: point at app syscall instr. We want an app * address b/c this signal will be delayed and the delivery will use * a direct app context: no translation from the cache. * The caller sets info->sigpending[sig]->use_sigcontext for us. */ sc->SC_XIP = (ptr_uint_t)(dcontext->asynch_target - sys_inst_len); DODEBUG({ instr_t instr; dr_isa_mode_t old_mode; dr_set_isa_mode(dcontext, isa_mode, &old_mode); instr_init(dcontext, &instr); ASSERT(decode(dcontext, (app_pc)sc->SC_XIP, &instr) != NULL && instr_is_syscall(&instr)); instr_free(dcontext, &instr); dr_set_isa_mode(dcontext, old_mode, NULL); }); } else { ASSERT_NOT_REACHED(); /* Inlined syscalls no longer come here. */ } LOG(THREAD, LOG_ASYNCH, 2, "%s: sigreturn pc is now " PFX "\n", __FUNCTION__, sc->SC_XIP); return true; } /* XXX: Better to get this code inside arch/ but we'd have to convert to an mcontext * which seems overkill. */ static fragment_t * find_next_fragment_from_gencode(dcontext_t *dcontext, sigcontext_t *sc) { fragment_t *f = NULL; fragment_t wrapper; byte *pc = (byte *)sc->SC_XIP; if (in_clean_call_save(dcontext, pc) || in_clean_call_restore(dcontext, pc)) { #ifdef AARCHXX f = fragment_pclookup(dcontext, (cache_pc)sc->SC_LR, &wrapper); #elif defined(X86) cache_pc retaddr = NULL; /* Get the retaddr. We assume this is the adjustment used by * insert_out_of_line_context_switch(). */ byte *ra_slot = dcontext->dstack - get_clean_call_switch_stack_size() - sizeof(retaddr); /* The extra x86 slot is only there for save. */ if (in_clean_call_save(dcontext, pc)) ra_slot -= get_clean_call_temp_stack_size(); if (safe_read(ra_slot, sizeof(retaddr), &retaddr)) f = fragment_pclookup(dcontext, retaddr, &wrapper); #else # error Unsupported arch. #endif } else if (in_indirect_branch_lookup_code(dcontext, pc)) { /* Try to find the target if the signal arrived in the IBL. * We could try to be a lot more precise by hardcoding the IBL * sequence here but that would make the code less maintainable. * Instead we try the registers that hold the target app address. */ /* First check for the jmp* on the hit path: that is the only place * in the ibl where the target tag is not sitting in a register. */ #if defined(X86) && defined(X64) /* Optimization for the common case of targeting a prefix on x86_64: * ff 61 08 jmp 0x08(%rcx)[8byte] * The tag is in 0x0(%rcx) so we avoid a decode and pclookup. */ if (*pc == 0xff && *(pc + 1) == 0x61 && *(pc + 2) == 0x08) { f = fragment_lookup(dcontext, *(app_pc *)sc->SC_XCX); } #endif if (f == NULL) { instr_t instr; instr_init(dcontext, &instr); decode_cti(dcontext, pc, &instr); if (instr_is_ibl_hit_jump(&instr)) { priv_mcontext_t mc; sig_full_cxt_t sc_full = { sc, NULL /*not provided*/ }; sigcontext_to_mcontext(&mc, &sc_full, DR_MC_INTEGER | DR_MC_CONTROL); byte *target; if (opnd_is_memory_reference(instr_get_target(&instr))) { target = instr_compute_address_priv(&instr, &mc); ASSERT(target != NULL); if (target != NULL) target = *(byte **)target; } else { ASSERT(opnd_is_reg(instr_get_target(&instr))); target = (byte *)reg_get_value_priv( opnd_get_reg(instr_get_target(&instr)), &mc); } ASSERT(target != NULL); if (target != NULL) f = fragment_pclookup(dcontext, target, &wrapper); /* I tried to hit this case running client.cleancallsig in a loop * and while I could on x86 and x86_64 I never did on ARM or * AArch64. We can remove this once someone hits it and it works. */ IF_AARCHXX(ASSERT_NOT_TESTED()); } instr_free(dcontext, &instr); } #ifdef AARCHXX /* The target is in r2 the whole time, w/ or w/o Thumb LSB. */ if (f == NULL && sc->SC_R2 != 0) f = fragment_lookup(dcontext, ENTRY_PC_TO_DECODE_PC(sc->SC_R2)); #elif defined(X86) /* The target is initially in xcx but is then copied to xbx. */ if (f == NULL && sc->SC_XBX != 0) f = fragment_lookup(dcontext, (app_pc)sc->SC_XBX); if (f == NULL && sc->SC_XCX != 0) f = fragment_lookup(dcontext, (app_pc)sc->SC_XCX); #else # error Unsupported arch. #endif } else { /* If in fcache_enter or do_syscall*, we stored the next_tag in asynch_target * in dispatch. But, we need to avoid using the asynch_target for the * fragment we just exited if we're in fcache_return. */ if (dcontext->asynch_target != NULL && !in_fcache_return(dcontext, pc)) f = fragment_lookup(dcontext, dcontext->asynch_target); } return f; } static void record_pending_signal(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt, sigframe_rt_t *frame, bool forged _IF_CLIENT(byte *access_address)) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field; sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt); /* XXX #1615: we need a full ucontext to store pre-xl8 simd values */ sigcontext_t sc_orig; byte *pc = (byte *)sc->SC_XIP; byte *xsp = (byte *)sc->SC_XSP; bool receive_now = false; bool blocked = false; bool handled = false; bool at_auto_restart_syscall = false; int syslen = 0; reg_t orig_retval_reg = sc->IF_X86_ELSE(SC_XAX, SC_R0); sigpending_t *pend; fragment_t *f = NULL; fragment_t wrapper; /* We no longer block SUSPEND_SIGNAL (i#184/PR 450670) or SIGSEGV (i#193/PR 287309). * But we can have re-entrancy issues in this routine if the app uses the same * SUSPEND_SIGNAL, or the nested SIGSEGV needs to be sent to the app. The * latter shouldn't happen unless the app sends SIGSEGV via SYS_kill(). */ if (ostd->processing_signal > 0 || /* If we interrupted receive_pending_signal() we can't prepend a new * pending or delete an old b/c we might mess up the state so we * just drop this one: should only happen for alarm signal */ (info->accessing_sigpending && !info->nested_pending_ok && /* we do want to report a crash in receive_pending_signal() */ (can_always_delay[sig] || is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)))) { LOG(THREAD, LOG_ASYNCH, 1, "nested signal %d\n", sig); ASSERT(ostd->processing_signal == 0 || sig == SUSPEND_SIGNAL || sig == SIGSEGV); ASSERT(can_always_delay[sig] || is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)); /* To avoid re-entrant execution of special_heap_alloc() and of * prepending to the pending list we just drop this signal. * FIXME i#194/PR 453996: do better. */ STATS_INC(num_signals_dropped); SYSLOG_INTERNAL_WARNING_ONCE("dropping nested signal"); return; } ostd->processing_signal++; /* no need for atomicity: thread-private */ /* First, check whether blocked, before we restore for sigsuspend (i#1340). */ if (kernel_sigismember(&info->app_sigblocked, sig)) blocked = true; if (info->in_sigsuspend) { /* sigsuspend ends when a signal is received, so restore the * old blocked set */ info->app_sigblocked = info->app_sigblocked_save; info->in_sigsuspend = false; /* update the set to restore to post-signal-delivery */ #ifdef MACOS ucxt->uc_sigmask = *(__darwin_sigset_t *)&info->app_sigblocked; #else ucxt->uc_sigmask = info->app_sigblocked; #endif #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "after sigsuspend, blocked signals are now:\n"); dump_sigset(dcontext, &info->app_sigblocked); } #endif } if (get_at_syscall(dcontext)) syslen = syscall_instr_length(dr_get_isa_mode(dcontext)); if (info->app_sigaction[sig] != NULL && info->app_sigaction[sig]->handler == (handler_t)SIG_IGN /* If a client registered a handler, put this in the queue. * Races between registering, queueing, and delivering are fine. */ IF_CLIENT_INTERFACE(&&!dr_signal_hook_exists())) { LOG(THREAD, LOG_ASYNCH, 3, "record_pending_signal (%d at pc " PFX "): action is SIG_IGN!\n", sig, pc); ostd->processing_signal--; return; } else if (blocked) { /* signal is blocked by app, so just record it, don't receive now */ LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d at pc " PFX "): signal is currently blocked\n", sig, pc); IF_LINUX(handled = notify_signalfd(dcontext, info, sig, frame)); } else if (safe_is_in_fcache(dcontext, pc, xsp)) { LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from cache pc " PFX "\n", sig, pc); if (forged || can_always_delay[sig]) { /* to make translation easier, want to delay if can until dispatch * unlink cur frag, wait for dispatch */ /* check for coarse first to avoid cost of coarse pclookup */ if (get_fcache_coarse_info(pc) != NULL) { /* PR 213040: we can't unlink coarse. If we fail to translate * we'll switch back to delaying, below. */ if (sig_is_alarm_signal(sig) && info->sigpending[sig] != NULL && info->sigpending[sig]->next != NULL && info->skip_alarm_xl8 > 0) { /* Translating coarse fragments is very expensive so we * avoid doing it when we're having trouble keeping up w/ * the alarm frequency (PR 213040), but we make sure we try * every once in a while to avoid unbounded signal delay */ info->skip_alarm_xl8--; STATS_INC(num_signals_coarse_delayed); } else { if (sig_is_alarm_signal(sig)) info->skip_alarm_xl8 = SKIP_ALARM_XL8_MAX; receive_now = true; LOG(THREAD, LOG_ASYNCH, 2, "signal interrupted coarse fragment so delivering now\n"); } } else { f = fragment_pclookup(dcontext, pc, &wrapper); ASSERT(f != NULL); ASSERT(!TEST(FRAG_COARSE_GRAIN, f->flags)); /* checked above */ LOG(THREAD, LOG_ASYNCH, 2, "\tdelaying until exit F%d\n", f->id); if (interrupted_inlined_syscall(dcontext, f, pc)) { /* PR 596147: if delayable signal arrives after syscall-skipping * jmp, either at syscall or post-syscall, we deliver * immediately, since we can't bound the delay */ receive_now = true; LOG(THREAD, LOG_ASYNCH, 2, "signal interrupted pre/post syscall itself so delivering now\n"); /* We don't set at_auto_restart_syscall because we just leave * the SA_RESTART kernel-supplied resumption point: with no * post-syscall handler to worry about we have no need to * change anything. */ } else { /* could get another signal but should be in same fragment */ ASSERT(info->interrupted == NULL || info->interrupted == f); if (info->interrupted != f) { /* Just in case there's a prior, avoid leaving it unlinked. */ relink_interrupted_fragment(dcontext, info); if (unlink_fragment_for_signal(dcontext, f, pc)) { info->interrupted = f; info->interrupted_pc = pc; } else { /* either was unlinked for trace creation, or we got another * signal before exiting cache to handle 1st */ ASSERT(info->interrupted == NULL || info->interrupted == f); } } } } } else { /* the signal interrupted code cache => run handler now! */ receive_now = true; LOG(THREAD, LOG_ASYNCH, 2, "\tnot certain can delay so handling now\n"); } } else if (in_generated_routine(dcontext, pc) || /* XXX: should also check fine stubs */ safe_is_in_coarse_stubs(dcontext, pc, xsp)) { /* Assumption: dynamo errors have been caught already inside * the master_signal_handler, thus any error in a generated routine * is an asynch signal that can be delayed */ LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from gen routine or stub " PFX "\n", sig, pc); if (get_at_syscall(dcontext)) { /* i#1206: the syscall was interrupted, so we can go back to dispatch * and don't need to receive it now (which complicates post-syscall handling) * w/o any extra delay. */ /* i#2659: we now use SA_RESTART to handle interrupting native * auto-restart syscalls. That means we have to adjust do_syscall * interruption to give us control so we can deliver the signal. Due to * needing to run post-syscall handlers (we don't want to get into nested * dcontexts like on Windows) it's simplest to go back to dispatch, which * is most easily done by emulating the non-SA_RESTART behavior. * XXX: This all seems backward: we should revisit this model and see if * we can get rid of this emulation and the auto-restart emulation. */ /* The get_at_syscall() check above distinguishes from just having * arrived at the syscall instr, but with SA_RESTART we can't distinguish * not-yet-executed-syscall from syscall-was-interrupted-in-the-kernel. * This matters for sigreturn (i#2995), whose asynch_target points somewhere * other than right after the syscall, so we exclude it (it can't be * interrupted so we know we haven't executed it yet). */ if (is_after_syscall_address(dcontext, pc + syslen) && !is_sigreturn_syscall_number(sc->SC_SYSNUM_REG)) { LOG(THREAD, LOG_ASYNCH, 2, "Adjusting interrupted auto-restart syscall from " PFX " to " PFX "\n", pc, pc + syslen); at_auto_restart_syscall = true; sc->SC_XIP += syslen; sc->IF_X86_ELSE(SC_XAX, SC_R0) = -EINTR; pc = (byte *)sc->SC_XIP; } } /* This could come from another thread's SYS_kill (via our gen do_syscall) */ DOLOG(1, LOG_ASYNCH, { if (!is_after_syscall_address(dcontext, pc) && !forged && !can_always_delay[sig]) { LOG(THREAD, LOG_ASYNCH, 1, "WARNING: signal %d in gen routine: may cause problems!\n", sig); } }); /* i#2019: for a signal arriving in gencode before entry to a fragment, * we need to unlink the fragment just like for a signal arriving inside * the fragment itself. * Multiple signals should all have the same asynch_target so we should * only need a single info->interrupted. */ if (info->interrupted == NULL && !get_at_syscall(dcontext)) { f = find_next_fragment_from_gencode(dcontext, sc); if (f != NULL && !TEST(FRAG_COARSE_GRAIN, f->flags)) { if (unlink_fragment_for_signal(dcontext, f, FCACHE_ENTRY_PC(f))) { info->interrupted = f; info->interrupted_pc = FCACHE_ENTRY_PC(f); } } } } else if (get_at_syscall(dcontext) && pc == vsyscall_sysenter_return_pc - syslen && /* See i#2995 comment above: rule out sigreturn */ !is_sigreturn_syscall_number(sc->SC_SYSNUM_REG)) { LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from restart-vsyscall " PFX "\n", sig, pc); /* While the kernel points at int 0x80 for a restart, we leverage our * existing sysenter restart mechanism. */ at_auto_restart_syscall = true; sc->SC_XIP = (reg_t)vsyscall_sysenter_return_pc; sc->IF_X86_ELSE(SC_XAX, SC_R0) = -EINTR; pc = (byte *)sc->SC_XIP; } else if (pc == vsyscall_sysenter_return_pc) { LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from vsyscall " PFX "\n", sig, pc); /* i#1206: the syscall was interrupted but is not auto-restart, so we can go * back to dispatch and don't need to receive it now (which complicates * post-syscall handling) */ } else if (thread_synch_check_state(dcontext, THREAD_SYNCH_NO_LOCKS) && /* Avoid grabbing locks for xl8 while in a suspended state (i#3026). */ ksynch_get_value(&ostd->suspended) == 0) { /* The signal interrupted DR or the client but it's at a safe spot so * deliver it now. */ receive_now = true; } else { /* the signal interrupted DR itself => do not run handler now! */ LOG(THREAD, LOG_ASYNCH, 2, "record_pending_signal(%d) from DR at pc " PFX "\n", sig, pc); if (!forged && !can_always_delay[sig] && !is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)) { /* i#195/PR 453964: don't re-execute if will just re-fault. * Our checks for dstack, etc. in master_signal_handler should * have accounted for everything */ ASSERT_NOT_REACHED(); abort_on_DR_fault(dcontext, pc, NULL, sig, frame, (sig == SIGSEGV) ? "SEGV" : "other", " unknown"); } } LOG(THREAD, LOG_ASYNCH, 3, "\taction is not SIG_IGN\n"); #if defined(X86) && defined(LINUX) LOG(THREAD, LOG_ASYNCH, 3, "\tretaddr = " PFX "\n", frame->pretcode); /* pretcode has same offs for plain */ #endif if (receive_now) { /* we need to translate sc before we know whether client wants to * suppress, so we need a backup copy */ bool xl8_success; ASSERT(!at_auto_restart_syscall); /* only used for delayed delivery */ sc_orig = *sc; ASSERT(!forged); /* cache the fragment since pclookup is expensive for coarse (i#658) */ f = fragment_pclookup(dcontext, (cache_pc)sc->SC_XIP, &wrapper); xl8_success = translate_sigcontext(dcontext, ucxt, !can_always_delay[sig], f); if (can_always_delay[sig] && !xl8_success) { /* delay: we expect this for coarse fragments if alarm arrives * in middle of ind branch region or sthg (PR 213040) */ LOG(THREAD, LOG_ASYNCH, 2, "signal is in un-translatable spot in coarse fragment: delaying\n"); receive_now = false; } } if (receive_now) { /* N.B.: since we abandon the old context for synchronous signals, * we do not need to mark this fragment as FRAG_CANNOT_DELETE */ #ifdef DEBUG if (stats->loglevel >= 2 && (stats->logmask & LOG_ASYNCH) != 0 && safe_is_in_fcache(dcontext, pc, xsp)) { ASSERT(f != NULL); LOG(THREAD, LOG_ASYNCH, 2, "Got signal at pc " PFX " in this fragment:\n", pc); disassemble_fragment(dcontext, f, false); } #endif LOG(THREAD, LOG_ASYNCH, 2, "Going to receive signal now\n"); /* If we end up executing the default action, we'll go native * since we translated the context. If there's a handler, * we'll copy the context to the app stack and then adjust the * original on our stack so we take over. */ execute_handler_from_cache(dcontext, sig, frame, &sc_orig, f _IF_CLIENT(access_address)); } else if (!handled) { #ifdef CLIENT_INTERFACE /* i#182/PR 449996: must let client act on blocked non-delayable signals to * handle instrumentation faults. Make sure we're at a safe spot: i.e., * only raise for in-cache faults. Checking forged and no-delay * to avoid the in-cache check for delayable signals => safer. */ if (blocked && !forged && !can_always_delay[sig] && safe_is_in_fcache(dcontext, pc, xsp)) { dr_signal_action_t action; /* cache the fragment since pclookup is expensive for coarse (i#658) */ f = fragment_pclookup(dcontext, (cache_pc)sc->SC_XIP, &wrapper); sc_orig = *sc; translate_sigcontext(dcontext, ucxt, true /*shouldn't fail*/, f); /* make a copy before send_signal_to_client() tweaks it */ sigcontext_t sc_interrupted = *sc; action = send_signal_to_client(dcontext, sig, frame, &sc_orig, access_address, true /*blocked*/, f); /* For blocked signal early event we disallow BYPASS (xref i#182/PR 449996) */ CLIENT_ASSERT(action != DR_SIGNAL_BYPASS, "cannot bypass a blocked signal event"); if (!handle_client_action_from_cache(dcontext, sig, action, frame, &sc_orig, &sc_interrupted, true /*blocked*/)) { ostd->processing_signal--; return; } /* restore original (untranslated) sc */ *get_sigcontext_from_rt_frame(frame) = sc_orig; } #endif /* i#196/PR 453847: avoid infinite loop of signals if try to re-execute */ if (blocked && !can_always_delay[sig] && !is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)) { ASSERT(default_action[sig] == DEFAULT_TERMINATE || default_action[sig] == DEFAULT_TERMINATE_CORE); LOG(THREAD, LOG_ASYNCH, 1, "blocked fatal signal %d cannot be delayed: terminating\n", sig); sc_orig = *sc; /* If forged we're likely couldbelinking, and we don't need to xl8. */ if (forged) ASSERT(is_couldbelinking(dcontext)); else translate_sigcontext(dcontext, ucxt, true /*shouldn't fail*/, NULL); /* the process should be terminated */ execute_default_from_cache(dcontext, sig, frame, &sc_orig, forged); ASSERT_NOT_REACHED(); } /* Happened in DR, do not translate context. Record for later processing * at a safe point with a clean app state. */ if (!blocked || sig >= OFFS_RT || (blocked && info->sigpending[sig] == NULL)) { /* only have 1 pending for blocked non-rt signals */ /* to avoid accumulating signals if we're slow in presence of * a high-rate itimer we only keep 2 alarm signals (PR 596768) */ if (sig_is_alarm_signal(sig)) { if (info->sigpending[sig] != NULL && info->sigpending[sig]->next != NULL) { ASSERT(info->sigpending[sig]->next->next == NULL); /* keep the oldest, replace newer w/ brand-new one, for * more spread-out alarms */ sigpending_t *temp = info->sigpending[sig]; info->sigpending[sig] = temp->next; special_heap_free(info->sigheap, temp); info->num_pending--; LOG(THREAD, LOG_ASYNCH, 2, "3rd pending alarm %d => dropping 2nd\n", sig); STATS_INC(num_signals_dropped); SYSLOG_INTERNAL_WARNING_ONCE("dropping 3rd pending alarm signal"); } } /* special heap alloc always uses sizeof(sigpending_t) blocks */ pend = special_heap_alloc(info->sigheap); ASSERT(sig > 0 && sig <= MAX_SIGNUM); info->num_pending++; if (info->num_pending > DYNAMO_OPTION(max_pending_signals) && !info->multiple_pending_units) info->multiple_pending_units = true; if (info->num_pending >= DYNAMO_OPTION(max_pending_signals)) { /* We're at the limit of our special heap: one more and it will try to * allocate a new unit, which is unsafe as it acquires locks. We take * several steps: we notify the user; we check for this on delivery as * well and proactively allocate a new unit in a safer context. * XXX: Perhaps we should drop some signals here? */ DO_ONCE({ char max_string[32]; snprintf(max_string, BUFFER_SIZE_ELEMENTS(max_string), "%d", DYNAMO_OPTION(max_pending_signals)); NULL_TERMINATE_BUFFER(max_string); SYSLOG(SYSLOG_WARNING, MAX_PENDING_SIGNALS, 3, get_application_name(), get_application_pid(), max_string); }); } pend->next = info->sigpending[sig]; info->sigpending[sig] = pend; pend->unblocked = !blocked; /* FIXME: note that for asynchronous signals we don't need to * bother to record exact machine context, even entire frame, * since don't want to pass dynamo pc context to app handler. * only copy frame for synchronous signals? those only * happen while in cache? but for asynch, we would have to * construct our own frame...kind of a pain. */ copy_frame_to_pending(dcontext, sig, frame _IF_CLIENT(access_address)); /* i#1145: check whether we should auto-restart an interrupted syscall */ if (at_auto_restart_syscall) { /* Adjust the pending frame to restart the syscall, if applicable */ sigframe_rt_t *frame = &(info->sigpending[sig]->rt_frame); sigcontext_t *sc_pend = get_sigcontext_from_rt_frame(frame); if (adjust_syscall_for_restart(dcontext, info, sig, sc_pend, f, orig_retval_reg)) { /* We're going to re-start this syscall after we go * back to dispatch, run the post-syscall handler (for -EINTR), * and deliver the signal. We've adjusted the sigcontext * for re-start on the sigreturn, but we need to tell * execute_handler_from_dispatch() to use our sigcontext * and not the mcontext. * A client will see a second set of pre + post handlers for * the restart, which seems reasonable, given the signal in * between. */ info->sigpending[sig]->use_sigcontext = true; } } } else { /* For clients, we document that we do not pass to them * unless we're prepared to deliver to app. We would have * to change our model to pass them non-final-translated * contexts for delayable signals in order to give them * signals as soon as they come in. Xref i#182/PR 449996. */ LOG(THREAD, LOG_ASYNCH, 3, "\tnon-rt signal already in queue, ignoring this one!\n"); } if (!blocked && !dcontext->signals_pending) dcontext->signals_pending = 1; } ostd->processing_signal--; } /* Distinguish SYS_kill-generated from instruction-generated signals. * If sent from another process we can't tell, but if sent from this * thread the interruption point should be our own post-syscall. * FIXME PR 368277: for other threads in same process we should set a flag * and identify them as well. * FIXME: for faults like SIGILL we could examine the interrupted pc * to see whether it is capable of generating such a fault (see code * used in handle_nudge_signal()). */ static bool is_sys_kill(dcontext_t *dcontext, byte *pc, byte *xsp, kernel_siginfo_t *info) { #if !defined(VMX86_SERVER) && !defined(MACOS) /* does not use SI_KERNEL */ /* i#133: use si_code to distinguish user-sent signals. * Even 2.2 Linux kernel supports <=0 meaning user-sent (except * SIGIO) so we assume we can rely on it. */ if (info->si_code <= 0) return true; #endif return (is_at_do_syscall(dcontext, pc, xsp) && (dcontext->sys_num == SYS_kill || #ifdef LINUX dcontext->sys_num == SYS_tkill || dcontext->sys_num == SYS_tgkill || dcontext->sys_num == SYS_rt_sigqueueinfo #elif defined(MACOS) dcontext->sys_num == SYS___pthread_kill #endif )); } static byte * compute_memory_target(dcontext_t *dcontext, cache_pc instr_cache_pc, kernel_ucontext_t *uc, kernel_siginfo_t *si, bool *write) { sigcontext_t *sc = SIGCXT_FROM_UCXT(uc); byte *target = NULL; instr_t instr; priv_mcontext_t mc; uint memopidx, memoppos, memopsize; opnd_t memop; bool found_target = false; bool in_maps; bool use_allmem = false; uint prot; IF_ARM(dr_isa_mode_t old_mode;) LOG(THREAD, LOG_ALL, 2, "computing memory target for " PFX " causing SIGSEGV, kernel claims it is " PFX "\n", instr_cache_pc, (byte *)si->si_addr); /* ARM's sigcontext_t has a "fault_address" field but it also seems unreliable */ IF_ARM(LOG(THREAD, LOG_ALL, 2, "fault_address: " PFX "\n", sc->fault_address)); /* We used to do a memory query to check if instr_cache_pc is readable, but * now we use TRY/EXCEPT because we don't have the instr length and the OS * query is expensive. If decoding faults, the signal handler will longjmp * out before it calls us recursively. */ instr_init(dcontext, &instr); IF_ARM({ /* Be sure to use the interrupted mode and not the last-dispatch mode */ dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), &old_mode); }); TRY_EXCEPT(dcontext, { decode(dcontext, instr_cache_pc, &instr); }, { return NULL; /* instr_cache_pc was unreadable */ }); IF_ARM(dr_set_isa_mode(dcontext, old_mode, NULL)); if (!instr_valid(&instr)) { LOG(THREAD, LOG_ALL, 2, "WARNING: got SIGSEGV for invalid instr at cache pc " PFX "\n", instr_cache_pc); ASSERT_NOT_REACHED(); instr_free(dcontext, &instr); return NULL; } ucontext_to_mcontext(&mc, uc); ASSERT(write != NULL); /* i#1009: If si_addr is plausibly one of the memory operands of the * faulting instruction, assume the target was si_addr. If none of the * memops match, fall back to checking page protections, which can be racy. * For si_addr == NULL, we fall back to the protection check because it's * too likely to be a valid memop and we can live with a race on a page that * is typically unmapped. */ if (si->si_code == SEGV_ACCERR && si->si_addr != NULL) { for (memopidx = 0; instr_compute_address_ex_priv(&instr, &mc, memopidx, &target, write, &memoppos); memopidx++) { /* i#1045: check whether operand and si_addr overlap */ memop = *write ? instr_get_dst(&instr, memoppos) : instr_get_src(&instr, memoppos); memopsize = opnd_size_in_bytes(opnd_get_size(memop)); LOG(THREAD, LOG_ALL, 2, "memory operand %u has address " PFX " and size %u\n", memopidx, target, memopsize); if ((byte *)si->si_addr >= target && (byte *)si->si_addr < target + memopsize) { target = (byte *)si->si_addr; found_target = true; break; } } } /* For fcache faults, use all_memory_areas, which is faster but acquires * locks. If it's possible we're in DR, go to the OS to avoid deadlock. */ if (DYNAMO_OPTION(use_all_memory_areas)) { use_allmem = safe_is_in_fcache(dcontext, instr_cache_pc, (byte *)sc->SC_XSP); } if (!found_target) { if (si->si_addr != NULL) { LOG(THREAD, LOG_ALL, 3, "%s: falling back to racy protection checks\n", __FUNCTION__); } /* i#115/PR 394984: consider all memops */ for (memopidx = 0; instr_compute_address_ex_priv(&instr, &mc, memopidx, &target, write, NULL); memopidx++) { if (use_allmem) { in_maps = get_memory_info(target, NULL, NULL, &prot); } else { in_maps = get_memory_info_from_os(target, NULL, NULL, &prot); } if ((!in_maps || !TEST(MEMPROT_READ, prot)) || (*write && !TEST(MEMPROT_WRITE, prot))) { found_target = true; break; } } } if (!found_target) { /* probably an NX fault: how tell whether kernel enforcing? */ in_maps = get_memory_info_from_os(instr_cache_pc, NULL, NULL, &prot); if (!in_maps || !TEST(MEMPROT_EXEC, prot)) { target = instr_cache_pc; found_target = true; } } /* we may still not find target, e.g. for SYS_kill(SIGSEGV) */ if (!found_target) target = NULL; DOLOG(2, LOG_ALL, { LOG(THREAD, LOG_ALL, 2, "For SIGSEGV at cache pc " PFX ", computed target %s " PFX "\n", instr_cache_pc, *write ? "write" : "read", target); loginst(dcontext, 2, &instr, "\tfaulting instr"); }); instr_free(dcontext, &instr); return target; } /* If native_state is true, assumes the fault is not in the cache and thus * does not need translation but rather should always be re-executed. */ static bool check_for_modified_code(dcontext_t *dcontext, cache_pc instr_cache_pc, kernel_ucontext_t *uc, byte *target, bool native_state) { /* special case: we expect a seg fault for executable regions * that were writable and marked read-only by us. * have to figure out the target address! * unfortunately the OS doesn't tell us, nor whether it's a write. * FIXME: if sent from SYS_kill(SIGSEGV), the pc will be post-syscall, * and if that post-syscall instr is a write that could have faulted, * how can we tell the difference? */ if (was_executable_area_writable(target)) { /* translate instr_cache_pc to original app pc * DO NOT use translate_sigcontext, don't want to change the * signal frame or else we'll lose control when we try to * return to signal pc! */ app_pc next_pc, translated_pc = NULL; fragment_t *f = NULL; fragment_t wrapper; ASSERT((cache_pc)SIGCXT_FROM_UCXT(uc)->SC_XIP == instr_cache_pc); if (!native_state) { /* For safe recreation we need to either be couldbelinking or hold * the initexit lock (to keep someone from flushing current * fragment), the initexit lock is easier */ mutex_lock(&thread_initexit_lock); /* cache the fragment since pclookup is expensive for coarse units (i#658) */ f = fragment_pclookup(dcontext, instr_cache_pc, &wrapper); translated_pc = recreate_app_pc(dcontext, instr_cache_pc, f); ASSERT(translated_pc != NULL); mutex_unlock(&thread_initexit_lock); } next_pc = handle_modified_code(dcontext, instr_cache_pc, translated_pc, target, f); if (!native_state) { /* going to exit from middle of fragment (at the write) so will mess up * trace building */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } } if (next_pc == NULL) { /* re-execute the write -- just have master_signal_handler return */ return true; } else { ASSERT(!native_state); /* Do not resume execution in cache, go back to dispatch. */ transfer_from_sig_handler_to_fcache_return( dcontext, uc, NULL, SIGSEGV, next_pc, (linkstub_t *)get_selfmod_linkstub(), false); /* now have master_signal_handler return */ return true; } } return false; } #ifndef HAVE_SIGALTSTACK /* The exact layout of this struct is relied on in master_signal_handler() * in x86.asm. */ struct clone_and_swap_args { byte *stack; byte *tos; }; /* Helper function for swapping handler to dstack */ bool sig_should_swap_stack(struct clone_and_swap_args *args, kernel_ucontext_t *ucxt) { byte *cur_esp; dcontext_t *dcontext = get_thread_private_dcontext(); if (dcontext == NULL) return false; GET_STACK_PTR(cur_esp); if (!is_on_dstack(dcontext, cur_esp)) { sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt); /* Pass back the proper args to clone_and_swap_stack: we want to * copy to dstack from the tos at the signal interruption point. */ args->stack = dcontext->dstack; /* leave room for fpstate */ args->stack -= signal_frame_extra_size(true); args->stack = (byte *)ALIGN_BACKWARD(args->stack, XSTATE_ALIGNMENT); args->tos = (byte *)sc->SC_XSP; return true; } else return false; } #endif /* Helper that takes over the current thread signaled via SUSPEND_SIGNAL. Kept * separate mostly to keep the priv_mcontext_t allocation out of * master_signal_handler_C. * If it returns, it returns false, and the signal should be squashed. */ static bool sig_take_over(kernel_ucontext_t *uc) { priv_mcontext_t mc; ucontext_to_mcontext(&mc, uc); /* We don't want our own blocked signals: we want the app's, stored in the frame. */ if (!os_thread_take_over(&mc, SIGMASK_FROM_UCXT(uc))) return false; ASSERT_NOT_REACHED(); /* shouldn't return */ return true; /* make compiler happy */ } static bool is_safe_read_ucxt(kernel_ucontext_t *ucxt) { app_pc pc = (app_pc)SIGCXT_FROM_UCXT(ucxt)->SC_XIP; return is_safe_read_pc(pc); } /* the master signal handler * WARNING: behavior varies with different versions of the kernel! * sigaction support was only added with 2.2 */ #ifndef X86_32 /* stub in x86.asm passes our xsp to us */ # ifdef MACOS void master_signal_handler_C(handler_t handler, int style, int sig, kernel_siginfo_t *info, kernel_ucontext_t *ucxt, byte *xsp) # else void master_signal_handler_C(int sig, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt, byte *xsp) # endif #else /* On ia32, adding a parameter disturbs the frame we're trying to capture, so we * add an intermediate frame and read the normal params off the stack directly. */ void master_signal_handler_C(byte *xsp) #endif { sigframe_rt_t *frame = (sigframe_rt_t *)xsp; #ifdef X86_32 /* Read the normal arguments from the frame. */ int sig = frame->sig; kernel_siginfo_t *siginfo = frame->pinfo; kernel_ucontext_t *ucxt = frame->puc; #endif /* !X64 */ sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt); thread_record_t *tr; #ifdef DEBUG uint level = 2; # if !defined(HAVE_MEMINFO) /* avoid logging every single TRY probe fault */ if (!dynamo_initialized) level = 5; # endif #endif bool local; #if defined(MACOS) && !defined(X64) /* The kernel clears fs, so we have to re-instate our selector, if * it was set in the first place. */ if (sc->__ss.__fs != 0) tls_reinstate_selector(sc->__ss.__fs); #endif #ifdef X86 /* i#2089: For is_thread_tls_initialized() we need a safe_read path that does not * do any logging or call get_thread_private_dcontext() as those will recurse. * This path is global so there's no SELF_PROTECT_LOCAL and we also bypass * the ENTERING_DR() for this short path. */ if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_magic) { sc->SC_RETURN_REG = 0; sc->SC_XIP = (reg_t)safe_read_tls_magic_recover; return; } else if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_self) { sc->SC_RETURN_REG = 0; sc->SC_XIP = (reg_t)safe_read_tls_self_recover; return; } else if (sig == SIGSEGV && sc->SC_XIP == (ptr_uint_t)safe_read_tls_app_self) { sc->SC_RETURN_REG = 0; sc->SC_XIP = (reg_t)safe_read_tls_app_self_recover; return; } #endif dcontext_t *dcontext = get_thread_private_dcontext(); #ifdef MACOS # ifdef X64 ASSERT((YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX64)) || (!YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT64))); # else ASSERT((YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT_AVX32)) || (!YMM_ENABLED() && ucxt->uc_mcsize == sizeof(_STRUCT_MCONTEXT))); # endif #endif /* i#350: To support safe_read or TRY_EXCEPT without a dcontext, use the * global dcontext * when handling safe_read faults. This lets us pass the check for a * dcontext below and causes us to use the global log. */ if (dcontext == NULL && (sig == SIGSEGV || sig == SIGBUS) && (is_safe_read_ucxt(ucxt) || (!dynamo_initialized && global_try_except.try_except_state != NULL))) { dcontext = GLOBAL_DCONTEXT; } if (dynamo_exited && get_num_threads() > 1 && sig == SIGSEGV) { /* PR 470957: this is almost certainly a race so just squelch it. * We live w/ the risk that it was holding a lock our release-build * exit code needs. */ exit_thread_syscall(1); } /* FIXME: ensure the path for recording a pending signal does not grab any DR locks * that could have been interrupted * e.g., synchronize_dynamic_options grabs the stats_lock! */ if (sig == SUSPEND_SIGNAL) { if (proc_get_vendor() == VENDOR_AMD) { /* i#3356: Work around an AMD processor bug where it does not clear the * hidden gs base when the gs selector is written. Pre-4.7 Linux kernels * leave the prior thread's base in place on a switch due to this. * We can thus come here and get the wrong dcontext on attach; worse, * we can get NULL here but the wrong one later during init. It's * safest to just set a non-zero value (the kernel ignores zero) for all * unknown threads here. There are no problems for non-attach takeover. */ if (dcontext == NULL || dcontext->owning_thread != get_sys_thread_id()) { /* tls_thread_preinit() further rules out a temp-native dcontext * and avoids clobbering it, to preserve the thread_lookup() case * below (which we do not want to run first as we could swap to * the incorrect dcontext midway through it). */ if (!tls_thread_preinit()) { SYSLOG_INTERNAL_ERROR_ONCE("ERROR: Failed to work around AMD context " "switch bug #3356: crashes or " "hangs may ensue..."); } dcontext = NULL; } } } if (dcontext == NULL && /* Check for a temporarily-native thread we're synch-ing with. */ (sig == SUSPEND_SIGNAL #ifdef X86 || (INTERNAL_OPTION(safe_read_tls_init) && /* Check for whether this is a thread with its invalid sentinel magic set. * In this case, we assume that it is either a thread that is currently * temporarily-native via API like DR_EMIT_GO_NATIVE, or a thread in the * clone window. We know by inspection of our own code that it is safe to * call thread_lookup for either case thread makes a clone or was just * cloned. i.e. thread_lookup requires a lock that must not be held by the * calling thread (i#2921). * XXX: what is ARM doing, any special case w/ dcontext == NULL? */ safe_read_tls_magic() == TLS_MAGIC_INVALID) #endif )) { tr = thread_lookup(get_sys_thread_id()); if (tr != NULL) dcontext = tr->dcontext; } if (dcontext == NULL || (dcontext != GLOBAL_DCONTEXT && (dcontext->signal_field == NULL || !((thread_sig_info_t *)dcontext->signal_field)->fully_initialized))) { /* FIXME: || !intercept_asynch, or maybe !under_our_control */ /* FIXME i#26: this could be a signal arbitrarily sent to this thread. * We could try to route it to another thread, using a global queue * of pending signals. But what if it was targeted to this thread * via SYS_{tgkill,tkill}? Can we tell the difference, even if * we watch the kill syscalls: could come from another process? */ if (sig_is_alarm_signal(sig)) { /* assuming an alarm during thread exit or init (xref PR 596127, * i#359): suppressing is fine */ } else if (sig == SUSPEND_SIGNAL && dcontext == NULL) { /* We sent SUSPEND_SIGNAL to a thread we don't control (no * dcontext), which means we want to take over. */ ASSERT(!doing_detach); if (!sig_take_over(ucxt)) return; ASSERT_NOT_REACHED(); /* else, shouldn't return */ } else { /* Using global dcontext because dcontext is NULL here. */ DOLOG(1, LOG_ASYNCH, { dump_sigcontext(GLOBAL_DCONTEXT, sc); }); SYSLOG_INTERNAL_ERROR("ERROR: master_signal_handler with no siginfo " "(i#26?): tid=%d, sig=%d", get_sys_thread_id(), sig); } /* see FIXME comments above. * workaround for now: suppressing is better than dying. */ if (can_always_delay[sig]) return; REPORT_FATAL_ERROR_AND_EXIT(FAILED_TO_HANDLE_SIGNAL, 2, get_application_name(), get_application_pid()); } /* we may be entering dynamo from code cache! */ /* Note that this is unsafe if -single_thread_in_DR => we grab a lock => * hang if signal interrupts DR: but we don't really support that option */ ENTERING_DR(); if (dcontext == GLOBAL_DCONTEXT) { local = false; tr = thread_lookup(get_sys_thread_id()); } else { tr = dcontext->thread_record; local = local_heap_protected(dcontext); if (local) SELF_PROTECT_LOCAL(dcontext, WRITABLE); } /* i#1921: For proper native execution with re-takeover we need to propagate * signals to app handlers while native. For now we do not support re-takeover * and we give up our handlers via signal_remove_handlers(). */ ASSERT(tr == NULL || tr->under_dynamo_control || IS_CLIENT_THREAD(dcontext) || sig == SUSPEND_SIGNAL); LOG(THREAD, LOG_ASYNCH, level, "\nmaster_signal_handler: thread=%d, sig=%d, xsp=" PFX ", retaddr=" PFX "\n", get_sys_thread_id(), sig, xsp, *((byte **)xsp)); LOG(THREAD, LOG_ASYNCH, level + 1, "siginfo: sig = %d, pid = %d, status = %d, errno = %d, si_code = %d\n", siginfo->si_signo, siginfo->si_pid, siginfo->si_status, siginfo->si_errno, siginfo->si_code); DOLOG(level + 1, LOG_ASYNCH, { dump_sigcontext(dcontext, sc); }); #if defined(X86_32) && !defined(VMX86_SERVER) && defined(LINUX) /* FIXME case 6700: 2.6.9 (FC3) kernel sets up our frame with a pretcode * of 0x440. This happens if our restorer is unspecified (though 2.6.9 * src code shows setting the restorer to a default value in that case...) * or if we explicitly point at dynamorio_sigreturn. I couldn't figure * out why it kept putting 0x440 there. So we fix the issue w/ this * hardcoded return. * This hack causes vmkernel to kill the process on sigreturn due to * vmkernel's non-standard sigreturn semantics. PR 404712. */ *((byte **)xsp) = (byte *)dynamorio_sigreturn; #endif /* N.B.: * ucontext_t is defined in two different places. The one we get * included is /usr/include/sys/ucontext.h, which would have us * doing this: * void *pc = (void *) ucxt->uc_mcontext.gregs[EIP]; * However, EIP is not defined for us (used to be in older * RedHat version) unless we define __USE_GNU, which we don't want to do * for other reasons, so we'd have to also say: * #define EIP 14 * Instead we go by the ucontext_t definition in * /usr/include/asm/ucontext.h, which has it containing a sigcontext struct, * defined in /usr/include/asm/sigcontext.h. This is the definition used * by the kernel. The two definitions are field-for-field * identical except that the sys one has an fpstate struct at the end -- * but the next field in the frame is an fpstate. The only mystery * is why the rt frame is declared as ucontext instead of sigcontext. * The kernel's version of ucontext must be the asm one! * And the sys one grabs the next field of the frame. * Also note that mcontext_t.fpregs == sigcontext.fpstate is NULL if * floating point operations have not been used (lazy fp state saving). * Also, sigset_t has different sizes according to kernel (8 bytes) vs. * glibc (128 bytes?). */ switch (sig) { case SIGBUS: /* PR 313665: look for DR crashes on unaligned memory or mmap bounds */ case SIGSEGV: { /* Older kernels do NOT fill out the signal-specific fields of siginfo, * except for SIGCHLD. Thus we cannot do this: * void *pc = (void*) siginfo->si_addr; * Thus we must use the third argument, which is a ucontext_t (see above) */ void *pc = (void *)sc->SC_XIP; bool syscall_signal = false; /* signal came from syscall? */ bool is_write = false; byte *target; bool is_DR_exception = false; #ifdef SIDELINE if (dcontext == NULL) { SYSLOG_INTERNAL_ERROR("seg fault in sideline thread -- NULL dcontext!"); ASSERT_NOT_REACHED(); } #endif if (is_safe_read_ucxt(ucxt) || (!dynamo_initialized && global_try_except.try_except_state != NULL) || dcontext->try_except.try_except_state != NULL) { /* handle our own TRY/EXCEPT */ try_except_context_t *try_cxt; #ifdef HAVE_MEMINFO /* our probe produces many of these every run */ /* since we use for safe_*, making a _ONCE */ SYSLOG_INTERNAL_WARNING_ONCE("(1+x) Handling our fault in a TRY at " PFX, pc); #endif LOG(THREAD, LOG_ALL, level, "TRY fault at " PFX "\n", pc); if (TEST(DUMPCORE_TRY_EXCEPT, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("try/except fault"); if (is_safe_read_ucxt(ucxt)) { sc->SC_XIP = (reg_t)safe_read_resume_pc(); /* Break out to log the normal return from the signal handler. */ break; } try_cxt = (dcontext != NULL) ? dcontext->try_except.try_except_state : global_try_except.try_except_state; ASSERT(try_cxt != NULL); /* The exception interception code did an ENTER so we must EXIT here */ EXITING_DR(); /* Since we have no sigreturn we have to restore the mask * manually, just like siglongjmp(). i#226/PR 492568: we rely * on the kernel storing the prior mask in ucxt, so we do not * need to store it on every setjmp. */ /* Verify that there's no scenario where the mask gets changed prior * to a fault inside a try. This relies on dr_setjmp_sigmask() filling * in the mask, which we only bother to do in debug build. */ ASSERT(memcmp(&try_cxt->context.sigmask, &ucxt->uc_sigmask, sizeof(ucxt->uc_sigmask)) == 0); sigprocmask_syscall(SIG_SETMASK, SIGMASK_FROM_UCXT(ucxt), NULL, sizeof(ucxt->uc_sigmask)); DR_LONGJMP(&try_cxt->context, LONGJMP_EXCEPTION); ASSERT_NOT_REACHED(); } target = compute_memory_target(dcontext, pc, ucxt, siginfo, &is_write); #ifdef CLIENT_INTERFACE if (CLIENTS_EXIST() && is_in_client_lib(pc)) { /* i#1354: client might write to a page we made read-only. * If so, handle the fault and re-execute it, if it's safe to do so * (we document these criteria under DR_MEMPROT_PRETEND_WRITE). */ if (is_write && !is_couldbelinking(dcontext) && OWN_NO_LOCKS(dcontext) && check_for_modified_code(dcontext, pc, ucxt, target, true /*native*/)) break; abort_on_fault(dcontext, DUMPCORE_CLIENT_EXCEPTION, pc, target, sig, frame, exception_label_client, (sig == SIGSEGV) ? "SEGV" : "BUS", " client library"); ASSERT_NOT_REACHED(); } #endif /* For !HAVE_MEMINFO, we cannot compute the target until * after the try/except check b/c compute_memory_target() * calls get_memory_info_from_os() which does a probe: and the * try/except could be from a probe itself. A try/except that * triggers a stack overflow should recover on the longjmp, so * this order should be fine. */ /* FIXME: share code with Windows callback.c */ /* FIXME PR 205795: in_fcache and is_dynamo_address do grab locks! */ if ((is_on_dstack(dcontext, (byte *)sc->SC_XSP) /* PR 302951: clean call arg processing => pass to app/client. * Rather than call the risky in_fcache we check whereami. */ IF_CLIENT_INTERFACE(&&(dcontext->whereami != DR_WHERE_FCACHE))) || is_on_alt_stack(dcontext, (byte *)sc->SC_XSP) || is_on_initstack((byte *)sc->SC_XSP)) { /* Checks here need to cover everything that record_pending_signal() * thinks is non-fcache, non-gencode: else that routine will kill * process since can't delay or re-execute (i#195/PR 453964). */ is_DR_exception = true; } else if (!safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP) && (in_generated_routine(dcontext, pc) || is_at_do_syscall(dcontext, pc, (byte *)sc->SC_XSP) || is_dynamo_address(pc))) { #ifdef CLIENT_INTERFACE if (!in_generated_routine(dcontext, pc) && !is_at_do_syscall(dcontext, pc, (byte *)sc->SC_XSP)) { /* PR 451074: client needs a chance to handle exceptions in its * own gencode. client_exception_event() won't return if client * wants to re-execute faulting instr. */ sigcontext_t sc_interrupted = *get_sigcontext_from_rt_frame(frame); dr_signal_action_t action = send_signal_to_client( dcontext, sig, frame, sc, target, false /*!blocked*/, NULL); if (action != DR_SIGNAL_DELIVER && /* for delivery, continue below */ !handle_client_action_from_cache(dcontext, sig, action, frame, sc, &sc_interrupted, false /*!blocked*/)) { /* client handled fault */ break; } } #endif is_DR_exception = true; } if (is_DR_exception) { /* kill(getpid(), SIGSEGV) looks just like a SIGSEGV in the store of eax * to mcontext after the syscall instr in do_syscall -- try to distinguish: */ if (is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, siginfo)) { LOG(THREAD, LOG_ALL, 2, "assuming SIGSEGV at post-do-syscall is kill, not our write fault\n"); syscall_signal = true; } if (!syscall_signal) { if (check_in_last_thread_vm_area(dcontext, target)) { /* See comments in callback.c as well. * FIXME: try to share code */ SYSLOG_INTERNAL_WARNING("(decode) exception in last area, " "DR pc=" PFX ", app pc=" PFX, pc, target); STATS_INC(num_exceptions_decode); if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 2, "intercept_exception: " "squashing old trace\n"); trace_abort(dcontext); } /* we do get faults when not building a bb: e.g., * ret_after_call_check does decoding (case 9396) */ if (dcontext->bb_build_info != NULL) { /* must have been building a bb at the time */ bb_build_abort(dcontext, true /*clean vm area*/, true /*unlock*/); } /* Since we have no sigreturn we have to restore the mask manually */ unblock_all_signals(NULL); /* Let's pass it back to the application - memory is unreadable */ if (TEST(DUMPCORE_FORGE_UNREAD_EXEC, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: Racy app execution (decode unreadable)"); os_forge_exception(target, UNREADABLE_MEMORY_EXECUTION_EXCEPTION); ASSERT_NOT_REACHED(); } else { abort_on_DR_fault(dcontext, pc, target, sig, frame, (sig == SIGSEGV) ? "SEGV" : "BUS", in_generated_routine(dcontext, pc) ? " generated" : ""); } } } /* if get here, pass the signal to the app */ ASSERT(pc != 0); /* shouldn't get here */ if (sig == SIGSEGV && !syscall_signal /*only for in-cache signals*/) { /* special case: we expect a seg fault for executable regions * that were writable and marked read-only by us. */ if (is_write && check_for_modified_code(dcontext, pc, ucxt, target, false /*!native*/)) { /* it was our signal, so don't pass to app -- return now */ break; } } /* pass it to the application (or client) */ LOG(THREAD, LOG_ALL, 1, "** Received SIG%s at cache pc " PFX " in thread " TIDFMT "\n", (sig == SIGSEGV) ? "SEGV" : "BUS", pc, get_thread_id()); ASSERT(syscall_signal || safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP)); /* we do not call trace_abort() here since we may need to * translate from a temp private bb (i#376): but all paths * that deliver the signal or redirect will call it */ record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(target)); break; } /* PR 212090: the signal we use to suspend threads */ case SUSPEND_SIGNAL: if (handle_suspend_signal(dcontext, ucxt, frame)) { /* i#1921: see comment above */ ASSERT(tr == NULL || tr->under_dynamo_control || IS_CLIENT_THREAD(dcontext)); record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL)); } /* else, don't deliver to app */ break; /* i#61/PR 211530: the signal we use for nudges */ case NUDGESIG_SIGNUM: if (handle_nudge_signal(dcontext, siginfo, ucxt)) record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL)); /* else, don't deliver to app */ break; case SIGALRM: case SIGVTALRM: case SIGPROF: if (handle_alarm(dcontext, sig, ucxt)) record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL)); /* else, don't deliver to app */ break; #ifdef SIDELINE case SIGCHLD: { int status = siginfo->si_status; if (siginfo->si_pid == 0) { /* FIXME: with older versions of linux the sigchld fields of * siginfo are not filled in properly! * This is my attempt to handle that, pid seems to be 0 */ break; } if (status != 0) { LOG(THREAD, LOG_ALL, 0, "*** Child thread died with error %d\n", status); ASSERT_NOT_REACHED(); } break; } #endif default: { record_pending_signal(dcontext, sig, ucxt, frame, false _IF_CLIENT(NULL)); break; } } /* end switch */ LOG(THREAD, LOG_ASYNCH, level, "\tmaster_signal_handler %d returning now to " PFX "\n\n", sig, sc->SC_XIP); /* Ensure we didn't get the app's sigstack into our frame. On Mac, the kernel * doesn't use the frame's uc_stack, so we limit this to Linux. */ IF_LINUX(ASSERT(dcontext == NULL || dcontext == GLOBAL_DCONTEXT || frame->uc.uc_stack.ss_sp == ((thread_sig_info_t *)dcontext->signal_field)->sigstack.ss_sp)); /* restore protections */ if (local) SELF_PROTECT_LOCAL(dcontext, READONLY); EXITING_DR(); } static bool execute_handler_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *our_frame, sigcontext_t *sc_orig, fragment_t *f _IF_CLIENT(byte *access_address)) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; /* we want to modify the sc in DR's frame */ kernel_ucontext_t *uc = get_ucontext_from_rt_frame(our_frame); sigcontext_t *sc = SIGCXT_FROM_UCXT(uc); kernel_sigset_t blocked; /* Need to get xsp now before get new dcontext. * This is the translated xsp, so we avoid PR 306410 (cleancall arg fault * on dstack => handler run on dstack) that Windows hit. */ byte *xsp = get_sigstack_frame_ptr(dcontext, sig, our_frame/* take xsp from (translated) * interruption point */); #ifdef CLIENT_INTERFACE sigcontext_t sc_interrupted = *sc; dr_signal_action_t action = send_signal_to_client( dcontext, sig, our_frame, sc_orig, access_address, false /*not blocked*/, f); if (!handle_client_action_from_cache(dcontext, sig, action, our_frame, sc_orig, &sc_interrupted, false /*!blocked*/)) return false; #else if (info->app_sigaction[sig] == NULL || info->app_sigaction[sig]->handler == (handler_t)SIG_DFL) { LOG(THREAD, LOG_ASYNCH, 3, "\taction is SIG_DFL\n"); if (execute_default_from_cache(dcontext, sig, our_frame, sc_orig, false)) { /* if we haven't terminated, restore original (untranslated) sc * on request. * XXX i#1615: this doesn't restore SIMD regs, if client translated them! */ *get_sigcontext_from_rt_frame(our_frame) = *sc_orig; } return false; } ASSERT(info->app_sigaction[sig] != NULL && info->app_sigaction[sig]->handler != (handler_t)SIG_IGN && info->app_sigaction[sig]->handler != (handler_t)SIG_DFL); #endif LOG(THREAD, LOG_ASYNCH, 2, "execute_handler_from_cache for signal %d\n", sig); RSTATS_INC(num_signals); /* now that we know it's not a client-involved fault, dump as app fault */ report_app_problem(dcontext, APPFAULT_FAULT, (byte *)sc->SC_XIP, (byte *)sc->SC_FP, "\nSignal %d delivered to application handler.\n", sig); LOG(THREAD, LOG_ASYNCH, 3, "\txsp is " PFX "\n", xsp); /* copy frame to appropriate stack and convert to non-rt if necessary */ copy_frame_to_stack(dcontext, sig, our_frame, (void *)xsp, false /*!pending*/); LOG(THREAD, LOG_ASYNCH, 3, "\tcopied frame from " PFX " to " PFX "\n", our_frame, xsp); sigcontext_t *app_sc = get_sigcontext_from_app_frame(info, sig, (void *)xsp); /* Because of difficulties determining when/if a signal handler * returns, we do what the kernel does: abandon all of our current * state, copy what we might need to the handler frame if we come back, * and then it's ok if the handler doesn't return. * If it does, we start interpreting afresh when we see sigreturn(). * This routine assumes anything needed to return has been put in the * frame (only needed for signals queued up while in dynamo), and goes * ahead and trashes the current dcontext. */ /* if we were building a trace, kill it */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } /* add to set of blocked signals those in sigaction mask */ blocked = info->app_sigaction[sig]->mask; /* SA_NOMASK says whether to block sig itself or not */ if ((info->app_sigaction[sig]->flags & SA_NOMASK) == 0) kernel_sigaddset(&blocked, sig); set_blocked(dcontext, &blocked, false /*relative: OR these in*/); /* Doesn't matter what most app registers are, signal handler doesn't * expect anything except the frame on the stack. We do need to set xsp, * only because if app wants special signal stack we need to point xsp * there. (If no special signal stack, this is a nop.) */ sc->SC_XSP = (ptr_uint_t)xsp; /* Set up args to handler: int sig, kernel_siginfo_t *siginfo, * kernel_ucontext_t *ucxt. */ #ifdef X86_64 sc->SC_XDI = sig; sc->SC_XSI = (reg_t) & ((sigframe_rt_t *)xsp)->info; sc->SC_XDX = (reg_t) & ((sigframe_rt_t *)xsp)->uc; #elif defined(AARCHXX) sc->SC_R0 = sig; if (IS_RT_FOR_APP(info, sig)) { sc->SC_R1 = (reg_t) & ((sigframe_rt_t *)xsp)->info; sc->SC_R2 = (reg_t) & ((sigframe_rt_t *)xsp)->uc; } if (sig_has_restorer(info, sig)) sc->SC_LR = (reg_t)info->app_sigaction[sig]->restorer; else sc->SC_LR = (reg_t)dynamorio_sigreturn; # ifndef AARCH64 /* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */ set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE); # endif #endif /* Set our sigreturn context (NOT for the app: we already copied the * translated context to the app stack) to point to fcache_return! * Then we'll go back through kernel, appear in fcache_return, * and go through dispatch & interp, without messing up DR stack. */ transfer_from_sig_handler_to_fcache_return( dcontext, uc, app_sc, sig, /* Make sure handler is next thing we execute */ (app_pc)SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]), (linkstub_t *)get_asynch_linkstub(), true); if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) { /* clear handler now -- can't delete memory since sigreturn, * others may look at sigaction struct, so we just set to default */ info->app_sigaction[sig]->handler = (handler_t)SIG_DFL; } LOG(THREAD, LOG_ASYNCH, 3, "\tset next_tag to handler " PFX ", xsp to " PFX "\n", SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig]), xsp); return true; } static bool execute_handler_from_dispatch(dcontext_t *dcontext, int sig) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; byte *xsp = get_sigstack_frame_ptr(dcontext, sig, NULL); sigframe_rt_t *frame = &(info->sigpending[sig]->rt_frame); priv_mcontext_t *mcontext = get_mcontext(dcontext); sigcontext_t *sc; kernel_ucontext_t *uc; kernel_sigset_t blocked; #ifdef CLIENT_INTERFACE dr_signal_action_t action; #else if (info->app_sigaction[sig] == NULL || info->app_sigaction[sig]->handler == (handler_t)SIG_DFL) { LOG(THREAD, LOG_ASYNCH, 3, "\taction is SIG_DFL\n"); execute_default_from_dispatch(dcontext, sig, frame); return true; } ASSERT(info->app_sigaction[sig] != NULL && info->app_sigaction[sig]->handler != (handler_t)SIG_IGN && info->app_sigaction[sig]->handler != (handler_t)SIG_DFL); #endif LOG(THREAD, LOG_ASYNCH, 2, "execute_handler_from_dispatch for signal %d\n", sig); RSTATS_INC(num_signals); /* modify the rtframe before copying to stack so we can pass final * version to client, and propagate its mods */ uc = get_ucontext_from_rt_frame(frame); sc = SIGCXT_FROM_UCXT(uc); /* Because of difficulties determining when/if a signal handler * returns, we do what the kernel does: abandon all of our current * state, copy what we might need to the handler frame if we come back, * and then it's ok if the handler doesn't return. * If it does, we start interpreting afresh when we see sigreturn(). */ #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "original sigcontext " PFX ":\n", sc); dump_sigcontext(dcontext, sc); } #endif if (info->sigpending[sig]->use_sigcontext) { LOG(THREAD, LOG_ASYNCH, 2, "%s: using sigcontext, not mcontext (syscall restart)\n", __FUNCTION__); } else { /* copy currently-interrupted-context to frame's context, so we can * abandon the currently-interrupted context. */ mcontext_to_ucontext(uc, mcontext); } /* Sigreturn needs the target ISA mode to be set in the T bit in cpsr. * Since we came from dispatch, the post-signal target's mode is in dcontext. */ IF_ARM(set_pc_mode_in_cpsr(sc, dr_get_isa_mode(dcontext))); /* mcontext does not contain fp or mmx or xmm state, which may have * changed since the frame was created (while finishing up interrupted * fragment prior to returning to dispatch). Since DR does not touch * this state except for xmm on x64, we go ahead and copy the * current state into the frame, and then touch up xmm for x64. */ /* FIXME: should this be done for all pending as soon as reach * dispatch? what if get two asynch inside same frag prior to exiting * cache? have issues with fpstate, but also prob with next_tag? FIXME */ /* FIXME: we should clear fpstate for app handler itself as that's * how our own handler is executed. */ #if defined(LINUX) && defined(X86) ASSERT(sc->fpstate != NULL); /* not doing i#641 yet */ save_fpstate(dcontext, frame); #endif /* LINUX && X86 */ #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "new sigcontext " PFX ":\n", sc); dump_sigcontext(dcontext, sc); LOG(THREAD, LOG_ASYNCH, 3, "\n"); } #endif /* FIXME: other state? debug regs? * if no syscall allowed between master_ (when frame created) and * receiving, then don't have to worry about debug regs, etc. * check for syscall when record pending, if it exists, try to * receive in pre_system_call or something? what if ignorable? FIXME! */ if (!info->sigpending[sig]->use_sigcontext) { /* for the pc we want the app pc not the cache pc */ sc->SC_XIP = (ptr_uint_t)dcontext->next_tag; LOG(THREAD, LOG_ASYNCH, 3, "\tset frame's eip to " PFX "\n", sc->SC_XIP); } #ifdef CLIENT_INTERFACE sigcontext_t sc_interrupted = *sc; action = send_signal_to_client(dcontext, sig, frame, NULL, info->sigpending[sig]->access_address, false /*not blocked*/, NULL); /* in order to pass to the client, we come all the way here for signals * the app has no handler for */ if (action == DR_SIGNAL_REDIRECT) { /* send_signal_to_client copied mcontext into frame's sc */ priv_mcontext_t *mcontext = get_mcontext(dcontext); ucontext_to_mcontext(mcontext, uc); dcontext->next_tag = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP); if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL)); mcontext->pc = dcontext->next_tag; sig_full_cxt_t sc_interrupted_full = { &sc_interrupted, NULL /*not provided*/ }; if (instrument_kernel_xfer(dcontext, DR_XFER_CLIENT_REDIRECT, sc_interrupted_full, NULL, NULL, dcontext->next_tag, mcontext->xsp, osc_empty, mcontext, sig)) dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc); return true; /* don't try another signal */ } else if (action == DR_SIGNAL_SUPPRESS || (info->app_sigaction[sig] != NULL && info->app_sigaction[sig]->handler == (handler_t)SIG_IGN)) { LOG(THREAD, LOG_ASYNCH, 2, "%s: not delivering!\n", (action == DR_SIGNAL_SUPPRESS) ? "client suppressing signal" : "app signal handler is SIG_IGN"); return false; } else if (action == DR_SIGNAL_BYPASS || (info->app_sigaction[sig] == NULL || info->app_sigaction[sig]->handler == (handler_t)SIG_DFL)) { LOG(THREAD, LOG_ASYNCH, 2, "%s: executing default action\n", (action == DR_SIGNAL_BYPASS) ? "client forcing default" : "app signal handler is SIG_DFL"); if (info->sigpending[sig]->use_sigcontext) { /* after the default action we want to go to the sigcontext */ dcontext->next_tag = canonicalize_pc_target(dcontext, (app_pc)sc->SC_XIP); ucontext_to_mcontext(get_mcontext(dcontext), uc); IF_ARM(dr_set_isa_mode(dcontext, get_pc_mode_from_cpsr(sc), NULL)); } execute_default_from_dispatch(dcontext, sig, frame); return true; } CLIENT_ASSERT(action == DR_SIGNAL_DELIVER, "invalid signal event return value"); #endif /* now that we've made all our changes and given the client a * chance to make changes, copy the frame to the appropriate stack * location and convert to non-rt if necessary */ copy_frame_to_stack(dcontext, sig, frame, xsp, true /*pending*/); /* now point at the app's frame */ sc = get_sigcontext_from_app_frame(info, sig, (void *)xsp); ASSERT(info->app_sigaction[sig] != NULL); /* add to set of blocked signals those in sigaction mask */ blocked = info->app_sigaction[sig]->mask; /* SA_NOMASK says whether to block sig itself or not */ if ((info->app_sigaction[sig]->flags & SA_NOMASK) == 0) kernel_sigaddset(&blocked, sig); set_blocked(dcontext, &blocked, false /*relative: OR these in*/); /* if we were building a trace, kill it */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } /* Doesn't matter what most app registers are, signal handler doesn't * expect anything except the frame on the stack. We do need to set xsp. */ mcontext->xsp = (ptr_uint_t)xsp; /* Set up args to handler: int sig, kernel_siginfo_t *siginfo, * kernel_ucontext_t *ucxt. */ #ifdef X86_64 mcontext->xdi = sig; mcontext->xsi = (reg_t) & ((sigframe_rt_t *)xsp)->info; mcontext->xdx = (reg_t) & ((sigframe_rt_t *)xsp)->uc; #elif defined(AARCHXX) mcontext->r0 = sig; if (IS_RT_FOR_APP(info, sig)) { mcontext->r1 = (reg_t) & ((sigframe_rt_t *)xsp)->info; mcontext->r2 = (reg_t) & ((sigframe_rt_t *)xsp)->uc; } if (sig_has_restorer(info, sig)) mcontext->lr = (reg_t)info->app_sigaction[sig]->restorer; else mcontext->lr = (reg_t)dynamorio_sigreturn; #endif #ifdef X86 /* Clear eflags DF (signal handler should match function entry ABI) */ mcontext->xflags &= ~EFLAGS_DF; #endif /* Make sure handler is next thing we execute */ dcontext->next_tag = canonicalize_pc_target( dcontext, (app_pc)SIGACT_PRIMARY_HANDLER(info->app_sigaction[sig])); if ((info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) { /* clear handler now -- can't delete memory since sigreturn, * others may look at sigaction struct, so we just set to default */ info->app_sigaction[sig]->handler = (handler_t)SIG_DFL; } #ifdef CLIENT_INTERFACE mcontext->pc = dcontext->next_tag; sig_full_cxt_t sc_full = { sc, NULL /*not provided*/ }; if (instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_DELIVERY, sc_full, NULL, NULL, dcontext->next_tag, mcontext->xsp, osc_empty, mcontext, sig)) dcontext->next_tag = canonicalize_pc_target(dcontext, mcontext->pc); #endif LOG(THREAD, LOG_ASYNCH, 3, "\tset xsp to " PFX "\n", xsp); return true; } /* The arg to SYS_kill, i.e., the signal number, should be in dcontext->sys_param0 */ /* This routine unblocks signals, but the caller must set the handler to default. */ static void terminate_via_kill(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(dcontext == get_thread_private_dcontext()); /* Enure signal_thread_exit() will not re-block */ memset(&info->app_sigblocked, 0, sizeof(info->app_sigblocked)); /* FIXME PR 541760: there can be multiple thread groups and thus * this may not exit all threads in the address space */ block_cleanup_and_terminate( dcontext, SYS_kill, /* Pass -pid in case main thread has exited * in which case will get -ESRCH */ IF_VMX86(os_in_vmkernel_userworld() ? -(int)get_process_id() :) get_process_id(), dcontext->sys_param0, true, 0, 0); ASSERT_NOT_REACHED(); } bool is_currently_on_sigaltstack(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; byte *cur_esp; GET_STACK_PTR(cur_esp); return (cur_esp >= (byte *)info->sigstack.ss_sp && cur_esp < (byte *)info->sigstack.ss_sp + info->sigstack.ss_size); } static void terminate_via_kill_from_anywhere(dcontext_t *dcontext, int sig) { dcontext->sys_param0 = sig; /* store arg to SYS_kill */ if (is_currently_on_sigaltstack(dcontext)) { /* We can't clean up our sigstack properly when we're on it * (i#1160) so we terminate on the dstack. */ call_switch_stack(dcontext, dcontext->dstack, (void (*)(void *))terminate_via_kill, NULL /*!initstack */, false /*no return */); } else { terminate_via_kill(dcontext); } ASSERT_NOT_REACHED(); } /* xref os_request_fatal_coredump() */ void os_terminate_via_signal(dcontext_t *dcontext, terminate_flags_t flags, int sig) { if (signal_is_interceptable(sig)) { bool set_action = false; #if defined(STATIC_LIBRARY) && defined(LINUX) if (INTERNAL_OPTION(invoke_app_on_crash)) { /* We come here for asserts. Faults already bypass this routine. */ dcontext_t *my_dc = get_thread_private_dcontext(); if (my_dc != NULL) { thread_sig_info_t *info = (thread_sig_info_t *)my_dc->signal_field; if (info != NULL && info->app_sigaction[sig] != NULL && IS_RT_FOR_APP(info, sig)) { set_action = true; sigaction_syscall(sig, info->app_sigaction[sig], NULL); } } } #endif if (!set_action) { DEBUG_DECLARE(bool res =) set_default_signal_action(sig); ASSERT(res); } } if (TEST(TERMINATE_CLEANUP, flags)) { /* we enter from several different places, so rewind until top-level kstat */ KSTOP_REWIND_UNTIL(thread_measured); ASSERT(dcontext != NULL); dcontext->sys_param0 = sig; /* XXX: the comment in the else below implies some systems have SYS_kill * of SIGSEGV w/ no handler on oneself actually return. * cleanup_and_terminate won't return to us and will use global_do_syscall * to invoke SYS_kill, which in debug will do an inf loop (good!) but * in release will do SYS_exit_group -- oh well, the systems I'm testing * on do an immediate exit. */ terminate_via_kill_from_anywhere(dcontext, sig); } else { /* general clean up is unsafe: just remove .1config file */ config_exit(); dynamorio_syscall(SYS_kill, 2, get_process_id(), sig); /* We try both the SYS_kill and the immediate crash since on some platforms * the SIGKILL is delayed and on others the *-1 is hanging(?): should investigate */ if (sig == SIGSEGV) /* make doubly-sure */ *((int *)PTR_UINT_MINUS_1) = 0; while (true) { /* in case signal delivery is delayed we wait...forever */ os_thread_yield(); } } ASSERT_NOT_REACHED(); } static bool execute_default_action(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, sigcontext_t *sc_orig, bool from_dispatch, bool forged) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; sigcontext_t *sc = get_sigcontext_from_rt_frame(frame); byte *pc = (byte *)sc->SC_XIP; LOG(THREAD, LOG_ASYNCH, 3, "execute_default_action for signal %d\n", sig); /* should only come here for signals we catch, or signal with ONESHOT * that didn't sigreturn */ ASSERT(info->we_intercept[sig] || (info->app_sigaction[sig]->flags & SA_ONESHOT) != 0); if (info->app_sigaction[sig] != NULL && (info->app_sigaction[sig]->flags & SA_ONESHOT) != 0) { if (!info->we_intercept[sig]) { handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); info->app_sigaction[sig] = NULL; } } /* FIXME PR 205310: we can't always perfectly emulate the default * behavior. To execute the default action, we have to un-register our * handler, if we have one, for signals whose default action is not * ignore or that will just be re-raised upon returning to the * interrupted context -- FIXME: are any of the ignores repeated? * SIGURG? * * If called from execute_handler_from_cache(), our master_signal_handler() * is going to return directly to the translated context: which means we * go native to re-execute the instr, which if it does in fact generate * the signal again means we have a nice transparent core dump. * * If called from execute_handler_from_dispatch(), we need to generate * the signal ourselves. */ if (default_action[sig] != DEFAULT_IGNORE) { DEBUG_DECLARE(bool ok =) set_default_signal_action(sig); ASSERT(ok); /* FIXME: to avoid races w/ shared handlers should set a flag to * prevent another thread from re-enabling. * Perhaps worse: what if this signal arrives for another thread * in the meantime (and the default is not terminate)? */ if (info->shared_app_sigaction) { LOG(THREAD, LOG_ASYNCH, 1, "WARNING: having to install SIG_DFL for thread " TIDFMT ", but will be " "shared!\n", get_thread_id()); } if (default_action[sig] == DEFAULT_TERMINATE || default_action[sig] == DEFAULT_TERMINATE_CORE) { report_app_problem(dcontext, APPFAULT_CRASH, pc, (byte *)sc->SC_FP, "\nSignal %d delivered to application as default " "action.\n", sig); /* App may call sigaction to set handler SIG_DFL (unnecessary but legal), * in which case DR will put a handler in info->app_sigaction[sig]. * We must clear it, otherwise, signal_thread_exit may cleanup the * handler and set it to SIG_IGN instead. */ if (info->app_sigaction[sig] != NULL) { ASSERT(info->we_intercept[sig]); handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); info->app_sigaction[sig] = NULL; } /* N.B.: we don't have to restore our handler because the * default action is for the process (entire thread group for NPTL) to die! */ if (from_dispatch || can_always_delay[sig] || forged || is_sys_kill(dcontext, pc, (byte *)sc->SC_XSP, &frame->info)) { /* This must have come from SYS_kill rather than raised by * a faulting instruction. Thus we can't go re-execute the * instr in order to re-raise the signal (if from_dispatch, * we delayed and can't re-execute anyway). Instead we * re-generate via SYS_kill. An alternative, if we don't * care about generating a core dump, is to use SYS_exit * and pass the right exit code to indicate the signal * number: that would avoid races w/ the sigaction. * * FIXME: should have app make the syscall to get a more * transparent core dump! */ LOG(THREAD, LOG_ASYNCH, 1, "Terminating via kill\n"); if (!from_dispatch && !forged) KSTOP_NOT_MATCHING_NOT_PROPAGATED(fcache_default); KSTOP_NOT_MATCHING_NOT_PROPAGATED(dispatch_num_exits); if (is_couldbelinking(dcontext)) /* won't be for SYS_kill (i#1159) */ enter_nolinking(dcontext, NULL, false); /* we could be on sigstack so call this version: */ terminate_via_kill_from_anywhere(dcontext, sig); ASSERT_NOT_REACHED(); } else { /* We assume that re-executing the interrupted instr will * re-raise the fault. We could easily be wrong: * xref PR 363811 infinite loop due to memory we * thought was unreadable and thus thought would raise * a signal; xref PR 368277 to improve is_sys_kill(), and the * "forged" parameter that puts us in the if() above. * FIXME PR 205310: we should check whether we come out of * the cache when we expected to terminate! * * An alternative is to abandon transparent core dumps and * do the same explicit SYS_kill we do for from_dispatch. * That would let us clean up DR as well. * FIXME: currently we do not clean up DR for a synchronous * signal death, but we do for asynch. */ /* i#552: cleanup and raise client exit event */ int instr_sz = 0; thread_sig_info_t *info; /* We are on the sigstack now, so assign it to NULL to avoid being * freed during process exit cleanup */ info = (thread_sig_info_t *)dcontext->signal_field; info->sigstack.ss_sp = NULL; /* We enter from several different places, so rewind until * top-level kstat. */ KSTOP_REWIND_UNTIL(thread_measured); /* We try to raise the same signal in app's context so a correct * coredump can be generated. However, the client might change * the code in a way that the corresponding app code won't * raise the signal, so we first check if the app instr is the * same as instr in the cache, and raise the signal (by return). * Otherwise, we kill the process instead. * XXX: if the PC is unreadable we'll just crash here...should check * for readability safely. */ ASSERT(sc_orig != NULL); instr_sz = decode_sizeof(dcontext, (byte *)sc_orig->SC_XIP, NULL _IF_X86_64(NULL)); if (instr_sz != 0 && pc != NULL && /* avoid crash on xl8 failure (i#1699) */ instr_sz == decode_sizeof(dcontext, pc, NULL _IF_X86_64(NULL)) && memcmp(pc, (byte *)sc_orig->SC_XIP, instr_sz) == 0) { /* the app instr matches the cache instr; cleanup and raise the * the signal in the app context */ LOG(THREAD, LOG_ASYNCH, 1, "Raising signal by re-executing\n"); dynamo_process_exit(); /* we cannot re-enter the cache, which is freed by now */ ASSERT(!from_dispatch); return false; } else { /* mismatch, cleanup and terminate */ LOG(THREAD, LOG_ASYNCH, 1, "Terminating via kill\n"); dcontext->sys_param0 = sig; terminate_via_kill(dcontext); ASSERT_NOT_REACHED(); } } } else { /* FIXME PR 297033: in order to intercept DEFAULT_STOP / * DEFAULT_CONTINUE signals, we need to set sigcontext to point * to some kind of regain-control routine, so that when our * thread gets to run again we can reset our handler. So far * we have no signals that fall here that we intercept. */ CLIENT_ASSERT(false, "STOP/CONT signals not supported"); } #if defined(DEBUG) && defined(INTERNAL) if (sig == SIGSEGV && !dynamo_exited) { /* pc should be an app pc at this point (it was translated) -- * check for bad cases here */ if (safe_is_in_fcache(dcontext, pc, (byte *)sc->SC_XSP)) { fragment_t wrapper; fragment_t *f; LOG(THREAD, LOG_ALL, 1, "Received SIGSEGV at pc " PFX " in thread " TIDFMT "\n", pc, get_thread_id()); f = fragment_pclookup(dcontext, pc, &wrapper); if (f) disassemble_fragment(dcontext, f, false); ASSERT_NOT_REACHED(); } else if (in_generated_routine(dcontext, pc)) { LOG(THREAD, LOG_ALL, 1, "Received SIGSEGV at generated non-code-cache pc " PFX "\n", pc); ASSERT_NOT_REACHED(); } } #endif } /* now continue at the interruption point and re-raise the signal */ return true; } static bool execute_default_from_cache(dcontext_t *dcontext, int sig, sigframe_rt_t *frame, sigcontext_t *sc_orig, bool forged) { return execute_default_action(dcontext, sig, frame, sc_orig, false, forged); } static void execute_default_from_dispatch(dcontext_t *dcontext, int sig, sigframe_rt_t *frame) { execute_default_action(dcontext, sig, frame, NULL, true, false); } void receive_pending_signal(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; sigpending_t *temp; int sig; LOG(THREAD, LOG_ASYNCH, 3, "receive_pending_signal\n"); if (info->interrupted != NULL) { relink_interrupted_fragment(dcontext, info); } /* grab first pending signal * XXX: start with real-time ones? */ /* "lock" the array to prevent a new signal that interrupts this bit of * code from prepended or deleting from the array while we're accessing it */ info->accessing_sigpending = true; /* barrier to prevent compiler from moving the above write below the loop */ __asm__ __volatile__("" : : : "memory"); if (!info->multiple_pending_units && info->num_pending + 2 >= DYNAMO_OPTION(max_pending_signals)) { /* We're close to the limit: proactively get a new unit while it's safe * to acquire locks. We do that by pushing over the edge. * We assume that filling up a 2nd unit is too pathological to plan for. */ info->multiple_pending_units = true; SYSLOG_INTERNAL_WARNING("many pending signals: asking for 2nd special unit"); sigpending_t *temp1 = special_heap_alloc(info->sigheap); sigpending_t *temp2 = special_heap_alloc(info->sigheap); sigpending_t *temp3 = special_heap_alloc(info->sigheap); special_heap_free(info->sigheap, temp1); special_heap_free(info->sigheap, temp2); special_heap_free(info->sigheap, temp3); } for (sig = 1; sig <= MAX_SIGNUM; sig++) { if (info->sigpending[sig] != NULL) { bool executing = true; /* We do not re-check whether blocked if it was unblocked at * receive time, to properly handle sigsuspend (i#1340). */ if (!info->sigpending[sig]->unblocked && kernel_sigismember(&info->app_sigblocked, sig)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsignal %d is blocked!\n", sig); continue; } LOG(THREAD, LOG_ASYNCH, 3, "\treceiving signal %d\n", sig); /* execute_handler_from_dispatch()'s call to copy_frame_to_stack() is * allowed to remove the front entry from info->sigpending[sig] and * jump to dispatch. */ executing = execute_handler_from_dispatch(dcontext, sig); temp = info->sigpending[sig]; info->sigpending[sig] = temp->next; special_heap_free(info->sigheap, temp); info->num_pending--; /* only one signal at a time! */ if (executing) { /* Make negative so our fcache_enter check makes progress but * our C code still considers there to be pending signals. */ dcontext->signals_pending = -1; break; } } } /* barrier to prevent compiler from moving the below write above the loop */ __asm__ __volatile__("" : : : "memory"); info->accessing_sigpending = false; /* we only clear this on a call to us where we find NO pending signals */ if (sig > MAX_SIGNUM) { LOG(THREAD, LOG_ASYNCH, 3, "\tclearing signals_pending flag\n"); dcontext->signals_pending = 0; } } /* Returns false if should NOT issue syscall. */ bool #ifdef LINUX handle_sigreturn(dcontext_t *dcontext, bool rt) #else handle_sigreturn(dcontext_t *dcontext, void *ucxt_param, int style) #endif { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; sigcontext_t *sc = NULL; /* initialize to satisfy Mac clang */ kernel_ucontext_t *ucxt = NULL; int sig = 0; app_pc next_pc; /* xsp was put in mcontext prior to pre_system_call() */ reg_t xsp = get_mcontext(dcontext)->xsp; #ifdef MACOS bool rt = true; #endif LOG(THREAD, LOG_ASYNCH, 3, "%ssigreturn()\n", rt ? "rt_" : ""); LOG(THREAD, LOG_ASYNCH, 3, "\txsp is " PFX "\n", xsp); #ifdef PROGRAM_SHEPHERDING /* if (!sig_has_restorer, region was never added to exec list, * allowed as pattern only and kicked off at first write via * selfmod detection or otherwise if vsyscall, so no worries * about having to remove it here */ #endif /* The easiest way to set all the non-GPR state that DR does not separately * preserve is to actually execute the sigreturn syscall, so we set up to do * that. We do not want to change DR's signal state, however, so we set it * back to DR's values after processing the state for the app. */ kernel_sigset_t our_mask; sigprocmask_syscall(SIG_SETMASK, NULL, &our_mask, sizeof(our_mask)); /* get sigframe: it's the top thing on the stack, except the ret * popped off pretcode. * WARNING: handler for tcsh's window_change (SIGWINCH) clobbers its * signal # arg, so don't use frame->sig! (kernel doesn't look at sig * so app can get away with it) */ if (rt) { #ifdef LINUX sigframe_rt_t *frame = (sigframe_rt_t *)(xsp IF_X86(-sizeof(char *))); /* use si_signo instead of sig, less likely to be clobbered by app */ sig = frame->info.si_signo; # ifdef X86_32 LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d (did == param %d)\n", sig, frame->sig); if (frame->sig != sig) LOG(THREAD, LOG_ASYNCH, 1, "WARNING: app sig handler clobbered sig param\n"); # endif sc = get_sigcontext_from_app_frame(info, sig, (void *)frame); ucxt = &frame->uc; #elif defined(MACOS) /* The initial frame fields on the stack are messed up due to * params to handler from tramp, so use params to syscall. * XXX: we don't have signal # though: so we have to rely on app * not clobbering the sig param field. */ sig = *(int *)xsp; LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d\n", sig); ucxt = (kernel_ucontext_t *)ucxt_param; if (ucxt == NULL) { /* On Mac the kernel seems to store state on whether the process is * on the altstack, so longjmp calls _sigunaltstack() which issues a * sigreturn syscall telling the kernel about the altstack change, * with a NULL context. */ LOG(THREAD, LOG_ASYNCH, 3, "\tsigunalstack sigreturn: no context\n"); return true; } sc = SIGCXT_FROM_UCXT(ucxt); #endif ASSERT(sig > 0 && sig <= MAX_SIGNUM && IS_RT_FOR_APP(info, sig)); /* Re-set sigstack from the value stored in the frame. Silently ignore failure, * just like the kernel does. */ uint ignored; /* The kernel checks for being on the stack *after* swapping stacks, so pass * sc->SC_XSP as the current stack. */ handle_sigaltstack(dcontext, &ucxt->uc_stack, NULL, sc->SC_XSP, &ignored); /* Restore DR's so sigreturn syscall won't change it. */ ucxt->uc_stack = info->sigstack; /* FIXME: what if handler called sigaction and requested rt * when itself was non-rt? */ /* Discard blocked signals, re-set from prev mask stored in frame. */ set_blocked(dcontext, SIGMASK_FROM_UCXT(ucxt), true /*absolute*/); /* Restore DR's so sigreturn syscall won't change it. */ *SIGMASK_FROM_UCXT(ucxt) = our_mask; } #if defined(LINUX) && !defined(X64) else { /* FIXME: libc's restorer pops prior to calling sigreturn, I have * no idea why, but kernel asks for xsp-8 not xsp-4...weird! */ kernel_sigset_t prevset; sigframe_plain_t *frame = (sigframe_plain_t *)(xsp IF_X86(-8)); /* We don't trust frame->sig (app sometimes clobbers it), and for * plain frame there's no other place that sig is stored, * so as a hack we added a new frame! * FIXME: this means we won't support nonstandard use of SYS_sigreturn, * e.g., as NtContinue, if frame didn't come from a real signal and so * wasn't copied to stack by us. */ sig = frame->sig_noclobber; LOG(THREAD, LOG_ASYNCH, 3, "\tsignal was %d (did == param %d)\n", sig, IF_X86_ELSE(frame->sig, 0)); # ifdef X86_32 if (frame->sig != sig) LOG(THREAD, LOG_ASYNCH, 1, "WARNING: app sig handler clobbered sig param\n"); # endif ASSERT(sig > 0 && sig <= MAX_SIGNUM && !IS_RT_FOR_APP(info, sig)); sc = get_sigcontext_from_app_frame(info, sig, (void *)frame); /* discard blocked signals, re-set from prev mask stored in frame */ prevset.sig[0] = frame->IF_X86_ELSE(sc.oldmask, uc.uc_mcontext.oldmask); if (_NSIG_WORDS > 1) { memcpy(&prevset.sig[1], &frame->IF_X86_ELSE(extramask, uc.sigset_ex), sizeof(prevset.sig[1])); } # ifdef ARM ucxt = &frame->uc; /* we leave ucxt NULL for x86: not needed there */ # endif set_blocked(dcontext, &prevset, true /*absolute*/); /* Restore DR's so sigreturn syscall won't change it. */ convert_rt_mask_to_nonrt(frame, &our_mask); } #endif /* LINUX */ /* Make sure we deliver pending signals that are now unblocked. */ check_signals_pending(dcontext, info); /* if we were building a trace, kill it */ if (is_building_trace(dcontext)) { LOG(THREAD, LOG_ASYNCH, 3, "\tsquashing trace-in-progress\n"); trace_abort(dcontext); } /* Defensively check for NULL. * XXX i#3182: It did happen but it is not clear how. */ if (info->app_sigaction[sig] != NULL && TEST(SA_ONESHOT, info->app_sigaction[sig]->flags)) { ASSERT(info->app_sigaction[sig]->handler == (handler_t)SIG_DFL); if (!info->we_intercept[sig]) { /* let kernel do default independent of us */ handler_free(dcontext, info->app_sigaction[sig], sizeof(kernel_sigaction_t)); info->app_sigaction[sig] = NULL; } } ASSERT(!safe_is_in_fcache(dcontext, (app_pc)sc->SC_XIP, (byte *)sc->SC_XSP)); #ifdef CLIENT_INTERFACE sig_full_cxt_t sc_full = { sc, NULL /*not provided*/ }; get_mcontext(dcontext)->pc = dcontext->next_tag; instrument_kernel_xfer(dcontext, DR_XFER_SIGNAL_RETURN, osc_empty, NULL, get_mcontext(dcontext), (app_pc)sc->SC_XIP, sc->SC_XSP, sc_full, NULL, sig); #endif #ifdef DEBUG if (stats->loglevel >= 3 && (stats->logmask & LOG_ASYNCH) != 0) { LOG(THREAD, LOG_ASYNCH, 3, "returning-to sigcontext " PFX ":\n", sc); dump_sigcontext(dcontext, sc); } #endif /* XXX i#1206: if we interrupted a non-ignorable syscall to run the app's * handler, and we set up to restart the syscall, we'll come here with the * translated syscall pc -- thus we can't distinguish from a signal interrupting * the prior app instr. So we can't simply point at do_syscall and call * set_at_syscall -- we have to re-interpret the syscall and re-run the * pre-syscall handler. Hopefully all our pre-syscall handlers can handle that. */ /* set up for dispatch */ /* we have to use a different slot since next_tag ends up holding the do_syscall * entry when entered from dispatch (we're called from * pre_syscall, prior to entering cache) */ dcontext->asynch_target = canonicalize_pc_target( dcontext, (app_pc)(sc->SC_XIP IF_ARM(| (TEST(EFLAGS_T, sc->SC_XFLAGS) ? 1 : 0)))); next_pc = dcontext->asynch_target; #ifdef VMX86_SERVER /* PR 404712: kernel only restores gp regs so we do it ourselves and avoid * complexities of kernel's non-linux-like sigreturn semantics */ sig_full_cxt_t sc_full = { sc, NULL }; /* non-ARM so NULL ok */ sigcontext_to_mcontext(get_mcontext(dcontext), &sc_full, DR_MC_ALL); #else /* HACK to get eax put into mcontext AFTER do_syscall */ dcontext->next_tag = (app_pc)sc->IF_X86_ELSE(SC_XAX, SC_R0); /* use special linkstub so we know why we came out of the cache */ sc->IF_X86_ELSE(SC_XAX, SC_R0) = (ptr_uint_t)get_asynch_linkstub(); /* set our sigreturn context to point to fcache_return */ /* We don't need PC_AS_JMP_TGT b/c the kernel uses EFLAGS_T for the mode */ sc->SC_XIP = (ptr_uint_t)fcache_return_routine(dcontext); /* if we overlaid inner frame on nested signal, will end up with this * error -- disable in release build since this is often app's fault (stack * too small) * FIXME: how make this transparent? what ends up happening is that we * get a segfault when we start interpreting dispatch, we want to make it * look like whatever would happen to the app... */ ASSERT((app_pc)sc->SC_XIP != next_pc); # ifdef AARCHXX set_stolen_reg_val(get_mcontext(dcontext), get_sigcxt_stolen_reg(sc)); set_sigcxt_stolen_reg(sc, (reg_t)*get_dr_tls_base_addr()); # ifdef AARCH64 /* On entry to the do_syscall gencode, we save X1 into TLS_REG1_SLOT. * Then the sigreturn would redirect the flow to the fcache_return gencode. * In fcache_return it recovers the values of x0 and x1 from TLS_SLOT 0 and 1. */ get_mcontext(dcontext)->r1 = sc->regs[1]; # else /* We're going to our fcache_return gencode which uses DEFAULT_ISA_MODE */ set_pc_mode_in_cpsr(sc, DEFAULT_ISA_MODE); # endif # endif #endif LOG(THREAD, LOG_ASYNCH, 3, "set next tag to " PFX ", sc->SC_XIP to " PFX "\n", next_pc, sc->SC_XIP); return IF_VMX86_ELSE(false, true); } bool is_signal_restorer_code(byte *pc, size_t *len) { /* is this a sigreturn pattern placed by kernel on the stack or vsyscall page? * for non-rt frame: * 0x58 popl %eax * 0xb8 <sysnum> movl SYS_sigreturn, %eax * 0xcd 0x80 int 0x80 * for rt frame: * 0xb8 <sysnum> movl SYS_rt_sigreturn, %eax * 0xcd 0x80 int 0x80 */ /* optimized we only need two uint reads, but we have to do * some little-endian byte-order reverses to get the right result */ #define reverse(x) \ ((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \ (((x)&0xff000000) >> 24)) #ifdef MACOS # define SYS_RT_SIGRET SYS_sigreturn #else # define SYS_RT_SIGRET SYS_rt_sigreturn #endif #ifndef X64 /* 58 b8 s4 s3 s2 s1 cd 80 */ static const uint non_rt_1w = reverse(0x58b80000 | (reverse(SYS_sigreturn) >> 16)); static const uint non_rt_2w = reverse((reverse(SYS_sigreturn) << 16) | 0xcd80); #endif /* b8 s4 s3 s2 s1 cd 80 XX */ static const uint rt_1w = reverse(0xb8000000 | (reverse(SYS_RT_SIGRET) >> 8)); static const uint rt_2w = reverse((reverse(SYS_RT_SIGRET) << 24) | 0x00cd8000); /* test rt first as it's the most common * only 7 bytes here so we ignore the last one (becomes msb since little-endian) */ if (*((uint *)pc) == rt_1w && (*((uint *)(pc + 4)) & 0x00ffffff) == rt_2w) { if (len != NULL) *len = 7; return true; } #ifndef X64 if (*((uint *)pc) == non_rt_1w && *((uint *)(pc + 4)) == non_rt_2w) { if (len != NULL) *len = 8; return true; } #endif return false; } void os_forge_exception(app_pc target_pc, dr_exception_type_t type) { /* PR 205136: * We want to deliver now, and the caller expects us not to return. * We have two alternatives: * 1) Emulate stack frame, and call transfer_to_dispatch() for delivery. We * may not know how to fill out every field of the frame (cr2, etc.). Plus, * we have problems w/ default actions (PR 205310) but we have to solve * those long-term anyway. We also have to create different frames based on * whether app intercepts via rt or not. * 2) Call SYS_tgkill from a special location that our handler can * recognize and know it's a signal meant for the app and that the * interrupted DR can be discarded. We'd then essentially repeat 1, * but modifying the kernel-generated frame. We'd have to always * intercept SIGILL. * I'm going with #1 for now b/c the common case is simpler. */ dcontext_t *dcontext = get_thread_private_dcontext(); #if defined(LINUX) && defined(X86) thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; #endif char frame_no_xstate[sizeof(sigframe_rt_t)]; sigframe_rt_t *frame = (sigframe_rt_t *)frame_no_xstate; int sig; dr_where_am_i_t cur_whereami = dcontext->whereami; kernel_ucontext_t *uc = get_ucontext_from_rt_frame(frame); sigcontext_t *sc = SIGCXT_FROM_UCXT(uc); switch (type) { case ILLEGAL_INSTRUCTION_EXCEPTION: sig = SIGILL; break; case UNREADABLE_MEMORY_EXECUTION_EXCEPTION: sig = SIGSEGV; break; case SINGLE_STEP_EXCEPTION: ASSERT_NOT_IMPLEMENTED(false); /* FIXME: i#2144 */ case IN_PAGE_ERROR_EXCEPTION: /* fall-through: Windows only */ default: ASSERT_NOT_REACHED(); sig = SIGSEGV; break; } LOG(GLOBAL, LOG_ASYNCH, 1, "os_forge_exception sig=%d\n", sig); /* Since we always delay delivery, we always want an rt frame. we'll convert * to a plain frame on delivery. */ memset(frame, 0, sizeof(*frame)); frame->info.si_signo = sig; /* Set si_code to match what would happen natively. We also need this to * avoid the !is_sys_kill() check in record_pending_signal() to avoid an * infinite loop (i#3171). */ frame->info.si_code = IF_LINUX_ELSE(SI_KERNEL, 0); frame->info.si_addr = target_pc; #ifdef X86_32 frame->sig = sig; frame->pinfo = &frame->info; frame->puc = (void *)&frame->uc; #endif #if defined(LINUX) && defined(X86) /* We use a TLS buffer to avoid too much stack space here. */ sc->fpstate = (kernel_fpstate_t *)get_xstate_buffer(dcontext); #endif mcontext_to_ucontext(uc, get_mcontext(dcontext)); sc->SC_XIP = (reg_t)target_pc; /* We'll fill in fpstate at delivery time. * We fill in segment registers to their current values and assume they won't * change and that these are the right values. * * FIXME i#2095: restore the app's segment register value(s). * * XXX: it seems to work w/o filling in the other state: * I'm leaving cr2 and other fields all zero. * If this gets problematic we could switch to approach #2. */ thread_set_segment_registers(sc); #if defined(X86) && defined(LINUX) if (sig_has_restorer(info, sig)) frame->pretcode = (char *)info->app_sigaction[sig]->restorer; else frame->pretcode = (char *)dynamorio_sigreturn; #endif /* We assume that we do not need to translate the context when forged. * If we did, we'd move this below enter_nolinking() (and update * record_pending_signal() to do the translation). */ record_pending_signal(dcontext, sig, &frame->uc, frame, true /*forged*/ _IF_CLIENT(NULL)); /* For most callers this is not necessary and we only do it to match * the Windows usage model: but for forging from our own handler, * this is good b/c it resets us to the base of dstack. */ /* tell dispatch() why we're coming there */ dcontext->whereami = DR_WHERE_TRAMPOLINE; KSTART(dispatch_num_exits); set_last_exit(dcontext, (linkstub_t *)get_asynch_linkstub()); if (is_couldbelinking(dcontext)) enter_nolinking(dcontext, NULL, false); transfer_to_dispatch( dcontext, get_mcontext(dcontext), cur_whereami != DR_WHERE_FCACHE && cur_whereami != DR_WHERE_SIGNAL_HANDLER /*full_DR_state*/); ASSERT_NOT_REACHED(); } void os_request_fatal_coredump(const char *msg) { /* To enable getting a coredump just make sure that rlimits are * not preventing getting one, e.g. ulimit -c unlimited */ SYSLOG_INTERNAL_ERROR("Crashing the process deliberately for a core dump!"); os_terminate_via_signal(NULL, 0 /*no cleanup*/, SIGSEGV); ASSERT_NOT_REACHED(); } void os_request_live_coredump(const char *msg) { #ifdef VMX86_SERVER if (os_in_vmkernel_userworld()) { vmk_request_live_coredump(msg); return; } #endif LOG(GLOBAL, LOG_ASYNCH, 1, "LiveCoreDump unsupported (PR 365105). " "Continuing execution without a core.\n"); return; } void os_dump_core(const char *msg) { /* FIXME Case 3408: fork stack dump crashes on 2.6 kernel, so moving the getchar * ahead to aid in debugging */ if (TEST(DUMPCORE_WAIT_FOR_DEBUGGER, dynamo_options.dumpcore_mask)) { SYSLOG_INTERNAL_ERROR("looping so you can use gdb to attach to pid %s", get_application_pid()); IF_CLIENT_INTERFACE(SYSLOG(SYSLOG_CRITICAL, WAITING_FOR_DEBUGGER, 2, get_application_name(), get_application_pid())); /* getchar() can hit our own vsyscall hook (from PR 212570); typically we * want to attach and not continue anyway, so doing an infinite loop: */ while (true) os_thread_yield(); } if (DYNAMO_OPTION(live_dump)) { os_request_live_coredump(msg); } if (TEST(DUMPCORE_INCLUDE_STACKDUMP, dynamo_options.dumpcore_mask)) { /* fork, dump core, then use gdb to get a stack dump * we can get into an infinite loop if there's a seg fault * in the process of doing this -- so we have a do-once test, * and if it failed we do the no-symbols dr callstack dump */ static bool tried_stackdump = false; if (!tried_stackdump) { tried_stackdump = true; stackdump(); } else { static bool tried_calldump = false; if (!tried_calldump) { tried_calldump = true; dump_dr_callstack(STDERR); } } } if (!DYNAMO_OPTION(live_dump)) { os_request_fatal_coredump(msg); ASSERT_NOT_REACHED(); } } #ifdef RETURN_AFTER_CALL bool at_known_exception(dcontext_t *dcontext, app_pc target_pc, app_pc source_fragment) { /* There is a known exception in signal restorers and the Linux * dynamic symbol resoulution. * The latter we assume it is the only other recurring known exception, * so the first time we pattern match to help make sure it is indeed * _dl_runtime_resolve (since with LD_BIND_NOW it will never be called). * After that we compare with the known value. */ static app_pc known_exception = 0; thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; LOG(THREAD, LOG_INTERP, 1, "RCT: testing for KNOWN exception " PFX " " PFX "\n", target_pc, source_fragment); /* Check if this is a signal return. FIXME: we should really get that from the frame itself. Since currently grabbing restorer only when copying a frame, this will work with nested signals only if they all have same restorer (I haven't seen restorers other than the one in libc) */ if (target_pc == info->signal_restorer_retaddr) { LOG(THREAD, LOG_INTERP, 1, "RCT: KNOWN exception this is a signal restorer --ok \n"); STATS_INC(ret_after_call_signal_restorer); return true; } if (source_fragment == known_exception) { LOG(THREAD, LOG_INTERP, 1, "RCT: KNOWN exception again _dl_runtime_resolve --ok\n"); return true; } if (known_exception == 0) { int ret_imm; return at_dl_runtime_resolve_ret(dcontext, source_fragment, &ret_imm); } return false; } #endif /* RETURN_AFTER_CALL */ /*************************************************************************** * ITIMERS * * We support combining an app itimer with a DR itimer for each of the 3 types * (PR 204556). */ static inline uint64 timeval_to_usec(struct timeval *t1) { return ((uint64)(t1->tv_sec)) * 1000000 + t1->tv_usec; } static inline void usec_to_timeval(uint64 usec, struct timeval *t1) { t1->tv_sec = (long)usec / 1000000; t1->tv_usec = (long)usec % 1000000; } static void init_itimer(dcontext_t *dcontext, bool first) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int i; ASSERT(info != NULL); ASSERT(!info->shared_itimer); /* else inherit */ LOG(THREAD, LOG_ASYNCH, 2, "thread has private itimers%s\n", os_itimers_thread_shared() ? " (for now)" : ""); if (os_itimers_thread_shared()) { /* we have to allocate now even if no itimer is installed until later, * so that all child threads point to the same data */ info->itimer = (thread_itimer_info_t(*)[NUM_ITIMERS])global_heap_alloc( sizeof(*info->itimer) HEAPACCT(ACCT_OTHER)); } else { /* for simplicity and parallel w/ shared we allocate proactively */ info->itimer = (thread_itimer_info_t(*)[NUM_ITIMERS])heap_alloc( dcontext, sizeof(*info->itimer) HEAPACCT(ACCT_OTHER)); } memset(info->itimer, 0, sizeof(*info->itimer)); for (i = 0; i < NUM_ITIMERS; i++) { ASSIGN_INIT_RECURSIVE_LOCK_FREE((*info->itimer)[i].lock, shared_itimer_lock); } if (first) { /* see if app has set up an itimer before we were loaded */ struct itimerval prev; int rc; int which; for (which = 0; which < NUM_ITIMERS; which++) { rc = getitimer_syscall(which, &prev); ASSERT(rc == SUCCESS); (*info->itimer)[which].app.interval = timeval_to_usec(&prev.it_interval); (*info->itimer)[which].app.value = timeval_to_usec(&prev.it_value); } } } /* Up to caller to hold lock for shared itimers */ static bool set_actual_itimer(dcontext_t *dcontext, int which, thread_sig_info_t *info, bool enable) { struct itimerval val; int rc; ASSERT(info != NULL && info->itimer != NULL); ASSERT(which >= 0 && which < NUM_ITIMERS); if (enable) { LOG(THREAD, LOG_ASYNCH, 2, "installing itimer %d interval=" INT64_FORMAT_STRING ", value=" INT64_FORMAT_STRING "\n", which, (*info->itimer)[which].actual.interval, (*info->itimer)[which].actual.value); /* i#2907: we have no signal handlers until we start the app (i#2335) * so we can't set up an itimer until then. */ ASSERT(dynamo_initialized); ASSERT(!info->shared_itimer || self_owns_recursive_lock(&(*info->itimer)[which].lock)); usec_to_timeval((*info->itimer)[which].actual.interval, &val.it_interval); usec_to_timeval((*info->itimer)[which].actual.value, &val.it_value); } else { LOG(THREAD, LOG_ASYNCH, 2, "disabling itimer %d\n", which); memset(&val, 0, sizeof(val)); (*info->itimer)[which].actual.value = 0; (*info->itimer)[which].actual.interval = 0; } rc = setitimer_syscall(which, &val, NULL); return (rc == SUCCESS); } /* Caller should hold lock */ static bool itimer_new_settings(dcontext_t *dcontext, int which, bool app_changed) { struct itimerval val; bool res = true; int rc; thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); ASSERT(which >= 0 && which < NUM_ITIMERS); ASSERT(!info->shared_itimer || self_owns_recursive_lock(&(*info->itimer)[which].lock)); /* the general strategy is to set the actual value to the smaller, * update the larger on each signal, and when the larger becomes * smaller do a one-time swap for the remaining */ if ((*info->itimer)[which].dr.interval > 0 && ((*info->itimer)[which].app.interval == 0 || (*info->itimer)[which].dr.interval < (*info->itimer)[which].app.interval)) (*info->itimer)[which].actual.interval = (*info->itimer)[which].dr.interval; else (*info->itimer)[which].actual.interval = (*info->itimer)[which].app.interval; if ((*info->itimer)[which].actual.value > 0) { if ((*info->itimer)[which].actual.interval == 0 && (*info->itimer)[which].dr.value == 0 && (*info->itimer)[which].app.value == 0) { (*info->itimer)[which].actual.value = 0; res = set_actual_itimer(dcontext, which, info, false /*disabled*/); } else { /* one of app or us has an in-flight timer which we should not interrupt. * but, we already set the new requested value (for app or us), so we * need to update the actual value so we subtract properly. */ rc = getitimer_syscall(which, &val); ASSERT(rc == SUCCESS); uint64 left = timeval_to_usec(&val.it_value); if (!app_changed && (*info->itimer)[which].actual.value == (*info->itimer)[which].app.value) (*info->itimer)[which].app.value = left; if (app_changed && (*info->itimer)[which].actual.value == (*info->itimer)[which].dr.value) (*info->itimer)[which].dr.value = left; (*info->itimer)[which].actual.value = left; } } else { if ((*info->itimer)[which].dr.value > 0 && ((*info->itimer)[which].app.value == 0 || (*info->itimer)[which].dr.value < (*info->itimer)[which].app.value)) (*info->itimer)[which].actual.value = (*info->itimer)[which].dr.value; else { (*info->itimer)[which].actual.value = (*info->itimer)[which].app.value; } res = set_actual_itimer(dcontext, which, info, true /*enable*/); } return res; } bool set_itimer_callback(dcontext_t *dcontext, int which, uint millisec, void (*func)(dcontext_t *, priv_mcontext_t *), void (*func_api)(dcontext_t *, dr_mcontext_t *)) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; bool rc; if (which < 0 || which >= NUM_ITIMERS) { CLIENT_ASSERT(false, "invalid itimer type"); return false; } if (func == NULL && func_api == NULL && millisec != 0) { CLIENT_ASSERT(false, "invalid function"); return false; } ASSERT(info != NULL && info->itimer != NULL); if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); (*info->itimer)[which].dr.interval = ((uint64)millisec) * 1000; (*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval; (*info->itimer)[which].cb = func; (*info->itimer)[which].cb_api = func_api; if (!dynamo_initialized) { /* i#2907: we have no signal handlers until we start the app (i#2335) * so we can't set up an itimer until then. start_itimer() called * from os_thread_under_dynamo() will enable it. */ LOG(THREAD, LOG_ASYNCH, 2, "delaying itimer until attach\n"); rc = true; } else rc = itimer_new_settings(dcontext, which, false /*us*/); if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); return rc; } uint get_itimer_frequency(dcontext_t *dcontext, int which) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; uint ms = 0; if (which < 0 || which >= NUM_ITIMERS) { CLIENT_ASSERT(false, "invalid itimer type"); return 0; } ASSERT(info != NULL && info->itimer != NULL); if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); ms = (*info->itimer)[which].dr.interval / 1000; if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); return ms; } static int signal_to_itimer_type(int sig) { if (sig == SIGALRM) return ITIMER_REAL; else if (sig == SIGVTALRM) return ITIMER_VIRTUAL; else if (sig == SIGPROF) return ITIMER_PROF; else return -1; } static bool alarm_signal_has_DR_only_itimer(dcontext_t *dcontext, int signal) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; int which = signal_to_itimer_type(signal); if (which == -1) return false; if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); bool DR_only = ((*info->itimer)[which].dr.value > 0 || (*info->itimer)[which].dr.interval > 0) && (*info->itimer)[which].app.value == 0 && (*info->itimer)[which].app.interval == 0; if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); return DR_only; } static bool handle_alarm(dcontext_t *dcontext, int sig, kernel_ucontext_t *ucxt) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); int which = 0; bool invoke_cb = false, pass_to_app = false, reset_timer_manually = false; bool should_release_lock = false; /* i#471: suppress alarms coming in after exit */ if (dynamo_exited) return pass_to_app; which = signal_to_itimer_type(sig); ASSERT(which != -1); LOG(THREAD, LOG_ASYNCH, 2, "received alarm %d @" PFX "\n", which, SIGCXT_FROM_UCXT(ucxt)->SC_XIP); /* This alarm could have interrupted an app thread making an itimer syscall, * which is why we don't want to block on a lock here. * It can't interrupt this same thread handling a prior alarm (b/c we block * the signal in our handler). It could arrive in thread B while thread A * is still handling a prior alarm if the alarm frequency is high and the * processing is slow, which is why we split the locks to be per-itimer-type. * We also avoid new thread setup code acquiring these itimer locks by using * atomic increments instead for the refcounts. Xref i#2993. */ if (info->shared_itimer) { #ifdef DEADLOCK_AVOIDANCE /* i#2061: in debug build we can get an alarm while in deadlock handling * code that holds innermost_lock. We just drop such alarms. */ if (OWN_MUTEX(&innermost_lock)) return pass_to_app; #endif if (self_owns_recursive_lock(&(*info->itimer)[which].lock)) { /* What can we do? We just go ahead and hope conflicting writes work out. * We don't re-acquire in case app was in middle of acquiring. */ } else { #define ALARM_LOCK_MAX_TRIES 4 int i; for (i = 0; i < ALARM_LOCK_MAX_TRIES; ++i) { if (try_recursive_lock(&(*info->itimer)[which].lock)) { should_release_lock = true; break; } os_thread_yield(); } if (!should_release_lock) { /* Heuristic: if fail N times then assume interrupted lock routine * while processing an app syscall (see above: we ruled out other * scenarios). What can we do? Just continue and hope conflicting * writes work out. */ } } } if ((*info->itimer)[which].app.value > 0) { /* Alarm could have been on its way when app value changed */ if ((*info->itimer)[which].app.value >= (*info->itimer)[which].actual.value) { (*info->itimer)[which].app.value -= (*info->itimer)[which].actual.value; LOG(THREAD, LOG_ASYNCH, 2, "\tapp value is now %d\n", (*info->itimer)[which].app.value); if ((*info->itimer)[which].app.value == 0) { pass_to_app = true; (*info->itimer)[which].app.value = (*info->itimer)[which].app.interval; } else reset_timer_manually = true; } } if ((*info->itimer)[which].dr.value > 0) { /* Alarm could have been on its way when DR value changed */ if ((*info->itimer)[which].dr.value >= (*info->itimer)[which].actual.value) { (*info->itimer)[which].dr.value -= (*info->itimer)[which].actual.value; LOG(THREAD, LOG_ASYNCH, 2, "\tdr value is now %d\n", (*info->itimer)[which].dr.value); if ((*info->itimer)[which].dr.value == 0) { invoke_cb = true; (*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval; } else reset_timer_manually = true; } } /* for efficiency we let the kernel reset the value to interval if * there's only one timer */ if (reset_timer_manually) { (*info->itimer)[which].actual.value = 0; itimer_new_settings(dcontext, which, true /*doesn't matter: actual.value==0*/); } else (*info->itimer)[which].actual.value = (*info->itimer)[which].actual.interval; if (invoke_cb) { /* invoke after setting new itimer value */ /* we save stack space by allocating superset dr_mcontext_t */ dr_mcontext_t dmc; dr_mcontext_init(&dmc); priv_mcontext_t *mc = dr_mcontext_as_priv_mcontext(&dmc); ucontext_to_mcontext(mc, ucxt); void (*cb)(dcontext_t *, priv_mcontext_t *) = (*info->itimer)[which].cb; void (*cb_api)(dcontext_t *, dr_mcontext_t *) = (*info->itimer)[which].cb_api; if (which == ITIMER_VIRTUAL && info->shared_itimer && should_release_lock) { release_recursive_lock(&(*info->itimer)[which].lock); should_release_lock = false; } if (cb != NULL) { cb(dcontext, mc); } else { cb_api(dcontext, &dmc); } } if (info->shared_itimer && should_release_lock) release_recursive_lock(&(*info->itimer)[which].lock); return pass_to_app; } /* Starts itimer if stopped, or increases refcount of existing itimer if already * started. It is *not* safe to call this more than once for the same thread, * since it will inflate the refcount and prevent cleanup. */ void start_itimer(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); bool start = false; if (info->shared_itimer) { /* i#2993: We avoid acquiring the lock as an alarm signal can arrive during * the lock routine (esp in debug build) and cause problems. */ int new_count = atomic_add_exchange_int((volatile int *)info->shared_itimer_underDR, 1); start = (new_count == 1); } else start = true; if (start) { /* Enable all DR itimers b/c at least one thread in this set of threads * sharing itimers is under DR control */ int which; LOG(THREAD, LOG_ASYNCH, 2, "starting DR itimers from thread " TIDFMT "\n", get_thread_id()); for (which = 0; which < NUM_ITIMERS; which++) { if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); /* May have already been set up with the start delayed (i#2907). */ if ((*info->itimer)[which].dr.interval > 0) { (*info->itimer)[which].dr.value = (*info->itimer)[which].dr.interval; itimer_new_settings(dcontext, which, false /*!app*/); } if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); } } } /* Decrements the itimer refcount, and turns off the itimer once there are no * more threads listening for it. It is not safe to call this more than once on * the same thread. */ void stop_itimer(dcontext_t *dcontext) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); bool stop = false; if (info->shared_itimer) { ASSERT(*info->shared_itimer_underDR > 0); int new_count = atomic_add_exchange_int((volatile int *)info->shared_itimer_underDR, -1); stop = (new_count == 0); } else stop = true; if (stop) { /* Disable all DR itimers b/c this set of threads sharing this * itimer is now completely native */ int which; LOG(THREAD, LOG_ASYNCH, 2, "stopping DR itimers from thread " TIDFMT "\n", get_thread_id()); for (which = 0; which < NUM_ITIMERS; which++) { if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); if ((*info->itimer)[which].dr.value > 0) { (*info->itimer)[which].dr.value = 0; if ((*info->itimer)[which].app.value > 0) { (*info->itimer)[which].actual.interval = (*info->itimer)[which].app.interval; } else set_actual_itimer(dcontext, which, info, false /*disable*/); } if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); } } } /* handle app itimer syscalls */ /* handle_pre_alarm also calls this function and passes NULL as prev_timer */ void handle_pre_setitimer(dcontext_t *dcontext, int which, const struct itimerval *new_timer, struct itimerval *prev_timer) { if (new_timer == NULL || which < 0 || which >= NUM_ITIMERS) return; thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); struct itimerval val; if (safe_read(new_timer, sizeof(val), &val)) { if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); /* save a copy in case the syscall fails */ (*info->itimer)[which].app_saved = (*info->itimer)[which].app; (*info->itimer)[which].app.interval = timeval_to_usec(&val.it_interval); (*info->itimer)[which].app.value = timeval_to_usec(&val.it_value); LOG(THREAD, LOG_ASYNCH, 2, "app setitimer type=%d interval=" SZFMT " value=" SZFMT "\n", which, (*info->itimer)[which].app.interval, (*info->itimer)[which].app.value); itimer_new_settings(dcontext, which, true /*app*/); if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); } } void handle_post_setitimer(dcontext_t *dcontext, bool success, int which, const struct itimerval *new_timer, struct itimerval *prev_timer) { if (new_timer == NULL || which < 0 || which >= NUM_ITIMERS) { ASSERT(new_timer == NULL || !success); return; } thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); ASSERT(which >= 0 && which < NUM_ITIMERS); if (!success && new_timer != NULL) { if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); /* restore saved pre-syscall settings */ (*info->itimer)[which].app = (*info->itimer)[which].app_saved; itimer_new_settings(dcontext, which, true /*app*/); if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); } if (success && prev_timer != NULL) handle_post_getitimer(dcontext, success, which, prev_timer); } void handle_post_getitimer(dcontext_t *dcontext, bool success, int which, struct itimerval *cur_timer) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; ASSERT(info != NULL && info->itimer != NULL); if (success) { /* write succeeded for kernel but we're user and can have races */ struct timeval val; DEBUG_DECLARE(bool ok;) ASSERT(which >= 0 && which < NUM_ITIMERS); ASSERT(cur_timer != NULL); if (info->shared_itimer) acquire_recursive_lock(&(*info->itimer)[which].lock); usec_to_timeval((*info->itimer)[which].app.interval, &val); IF_DEBUG(ok =) safe_write_ex(&cur_timer->it_interval, sizeof(val), &val, NULL); ASSERT(ok); if (safe_read(&cur_timer->it_value, sizeof(val), &val)) { /* subtract the difference between last-asked-for value * and current value to reflect elapsed time */ uint64 left = (*info->itimer)[which].app.value - ((*info->itimer)[which].actual.value - timeval_to_usec(&val)); usec_to_timeval(left, &val); IF_DEBUG(ok =) safe_write_ex(&cur_timer->it_value, sizeof(val), &val, NULL); ASSERT(ok); } else ASSERT_NOT_REACHED(); if (info->shared_itimer) release_recursive_lock(&(*info->itimer)[which].lock); } } /* handle app alarm syscall */ /* alarm uses the same itimer and could be defined in terms of setitimer */ void handle_pre_alarm(dcontext_t *dcontext, unsigned int sec) { struct itimerval val; val.it_interval.tv_usec = 0; val.it_interval.tv_sec = 0; val.it_value.tv_usec = 0; val.it_value.tv_sec = sec; handle_pre_setitimer(dcontext, ITIMER_REAL, &val, NULL); } void handle_post_alarm(dcontext_t *dcontext, bool success, unsigned int sec) { /* alarm is always successful, so do nothing in post */ ASSERT(success); return; } /*************************************************************************** * Internal DR communication */ typedef struct _sig_detach_info_t { KSYNCH_TYPE *detached; byte *sigframe_xsp; #ifdef HAVE_SIGALTSTACK stack_t *app_sigstack; #endif } sig_detach_info_t; /* xsp is only set for X86 */ static void notify_and_jmp_without_stack(KSYNCH_TYPE *notify_var, byte *continuation, byte *xsp) { if (ksynch_kernel_support()) { /* Can't use dstack once we signal so in asm we do: * futex/semaphore = 1; * %xsp = xsp; * dynamorio_condvar_wake_and_jmp(notify_var, continuation); */ #ifdef MACOS ASSERT(sizeof(notify_var->sem) == 4); #endif #ifdef X86 # ifndef MACOS /* i#2632: recent clang for 32-bit annoyingly won't do the right thing for * "jmp dynamorio_condvar_wake_and_jmp" and leaves relocs so we ensure it's PIC. * We do this first as it may end up clobbering a scratch reg like xax. */ void (*asm_jmp_tgt)() = dynamorio_condvar_wake_and_jmp; asm("mov %0, %%" ASM_XDX : : "m"(asm_jmp_tgt)); # endif asm("mov %0, %%" ASM_XAX : : "m"(notify_var)); asm("mov %0, %%" ASM_XCX : : "m"(continuation)); asm("mov %0, %%" ASM_XSP : : "m"(xsp)); # ifdef MACOS asm("movl $1,4(%" ASM_XAX ")"); asm("jmp _dynamorio_condvar_wake_and_jmp"); # else asm("movl $1,(%" ASM_XAX ")"); asm("jmp *%" ASM_XDX); # endif #elif defined(AARCHXX) asm("ldr " ASM_R0 ", %0" : : "m"(notify_var)); asm("mov " ASM_R1 ", #1"); asm("str " ASM_R1 ",[" ASM_R0 "]"); asm("ldr " ASM_R1 ", %0" : : "m"(continuation)); asm("b dynamorio_condvar_wake_and_jmp"); #endif } else { ksynch_set_value(notify_var, 1); #ifdef X86 asm("mov %0, %%" ASM_XSP : : "m"(xsp)); asm("mov %0, %%" ASM_XAX : : "m"(continuation)); asm("jmp *%" ASM_XAX); #elif defined(AARCHXX) asm("ldr " ASM_R0 ", %0" : : "m"(continuation)); asm(ASM_INDJMP " " ASM_R0); #endif /* X86/ARM */ } } /* Go native from detach. This is executed on the app stack. */ static void sig_detach_go_native(sig_detach_info_t *info) { byte *xsp = info->sigframe_xsp; #ifdef HAVE_SIGALTSTACK /* Restore the app signal stack, though sigreturn will overwrite this with the * uc_stack in the frame's ucontext anyway (which we already set for the app). */ DEBUG_DECLARE(int rc =) sigaltstack_syscall(info->app_sigstack, NULL); ASSERT(rc == 0); #endif #ifdef X86 /* Skip pretcode */ xsp += sizeof(char *); #endif notify_and_jmp_without_stack(info->detached, (byte *)dynamorio_sigreturn, xsp); ASSERT_NOT_REACHED(); } /* Sets this (slave) thread to detach by directly returning from the signal. */ static void sig_detach(dcontext_t *dcontext, sigframe_rt_t *frame, KSYNCH_TYPE *detached) { thread_sig_info_t *info = (thread_sig_info_t *)dcontext->signal_field; byte *xsp; sig_detach_info_t detach_info; LOG(THREAD, LOG_ASYNCH, 1, "%s: detaching\n", __FUNCTION__); /* Update the mask of the signal frame so that the later sigreturn will * restore the app signal mask. */ memcpy(&frame->uc.uc_sigmask, &info->app_sigblocked, sizeof(info->app_sigblocked)); /* Copy the signal frame to the app stack. * XXX: We live with the transparency risk of storing the signal frame on * the app stack: we assume the app stack is writable where we need it to be, * and that we're not clobbering any app data beyond TOS. */ xsp = get_sigstack_frame_ptr(dcontext, SUSPEND_SIGNAL, frame); copy_frame_to_stack(dcontext, SUSPEND_SIGNAL, frame, xsp, false /*!pending*/); #ifdef HAVE_SIGALTSTACK /* Make sure the frame's sigstack reflects the app stack. * copy_frame_to_stack() should have done this for us. */ ASSERT(((sigframe_rt_t *)xsp)->uc.uc_stack.ss_sp == info->app_sigstack.ss_sp); #endif /* Restore app segment registers. */ os_thread_not_under_dynamo(dcontext); os_tls_thread_exit(dcontext->local_state); #ifdef HAVE_SIGALTSTACK /* We can't restore the app's sigstack here as that will invalidate the * sigstack we're currently on. */ detach_info.app_sigstack = &info->app_sigstack; #endif detach_info.detached = detached; detach_info.sigframe_xsp = xsp; call_switch_stack(&detach_info, xsp, (void (*)(void *))sig_detach_go_native, false /*free_initstack*/, false /*do not return*/); ASSERT_NOT_REACHED(); } /* Returns whether to pass on to app */ static bool handle_suspend_signal(dcontext_t *dcontext, kernel_ucontext_t *ucxt, sigframe_rt_t *frame) { os_thread_data_t *ostd = (os_thread_data_t *)dcontext->os_field; kernel_sigset_t prevmask; sig_full_cxt_t sc_full; ASSERT(ostd != NULL); if (ostd->terminate) { /* PR 297902: exit this thread, without using the dstack */ /* For MacOS, we need a stack as 32-bit syscalls take args on the stack. * We go ahead and use it for x86 too for simpler sysenter return. * We don't have a lot of options: we're terminating, so we go ahead * and use the app stack. */ byte *app_xsp; if (IS_CLIENT_THREAD(dcontext)) app_xsp = (byte *)SIGCXT_FROM_UCXT(ucxt)->SC_XSP; else app_xsp = (byte *)get_mcontext(dcontext)->xsp; LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: exiting\n"); ASSERT(app_xsp != NULL); notify_and_jmp_without_stack(&ostd->terminated, (byte *)dynamorio_sys_exit, app_xsp); ASSERT_NOT_REACHED(); return false; } if (!doing_detach && is_thread_currently_native(dcontext->thread_record) && !IS_CLIENT_THREAD(dcontext) IF_APP_EXPORTS(&&!dr_api_exit)) { if (!sig_take_over(ucxt)) return false; ASSERT_NOT_REACHED(); /* else, shouldn't return */ } /* If suspend_count is 0, we are not trying to suspend this thread * (os_thread_resume() may have already decremented suspend_count to 0, but * os_thread_suspend() will not send a signal until this thread unsets * ostd->suspended, so not having a lock around the suspend_count read is * ok), so pass signal to app. * If we are trying or have already suspended this thread, our own * os_thread_suspend() will not send a 2nd suspend signal until we are * completely resumed, so we can distinguish app uses of SUSPEND_SIGNAL. We * can't have a race between the read and write of suspended_sigcxt b/c * signals are blocked. It's fine to have a race and reorder the app's * signal w/ DR's. */ if (ostd->suspend_count == 0) return true; /* pass to app */ ASSERT(ostd->suspended_sigcxt == NULL); /* XXX: we're not setting DR_WHERE_SIGNAL_HANDLER in enough places. * It's trickier than other whereamis b/c we want to resume the * prior whereami when we return from the handler, but there are * complex control paths that do not always return. * We try to at least do it for the ksynch_wait here. */ dr_where_am_i_t prior_whereami = dcontext->whereami; dcontext->whereami = DR_WHERE_SIGNAL_HANDLER; sig_full_initialize(&sc_full, ucxt); ostd->suspended_sigcxt = &sc_full; LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: suspended now\n"); /* We cannot use mutexes here as we have interrupted DR at an * arbitrary point! Thus we can't use the event_t routines. * However, the existing synch and check above prevent any * re-entrance here, and our cond vars target just a single thread, * so we can get away w/o a mutex. */ /* Notify os_thread_suspend that it can now return, as this thread is * officially suspended now and is ready for thread_{get,set}_mcontext. */ ASSERT(ksynch_get_value(&ostd->suspended) == 0); ksynch_set_value(&ostd->suspended, 1); ksynch_wake_all(&ostd->suspended); /* We're sitting on our sigaltstack w/ all signals blocked. We're * going to stay here but unblock all signals so we don't lose any * delivered while we're waiting. We're at a safe enough point (now * that we've set ostd->suspended: i#5779) to re-enter * master_signal_handler(). We use a mutex in thread_{suspend,resume} to * prevent our own re-suspension signal from arriving before we've * re-blocked on the resume. */ sigprocmask_syscall(SIG_SETMASK, SIGMASK_FROM_UCXT(ucxt), &prevmask, sizeof(ucxt->uc_sigmask)); /* i#96/PR 295561: use futex(2) if available */ while (ksynch_get_value(&ostd->wakeup) == 0) { /* Waits only if the wakeup flag is not set as 1. Return value * doesn't matter because the flag will be re-checked. */ ksynch_wait(&ostd->wakeup, 0, 0); if (ksynch_get_value(&ostd->wakeup) == 0) { /* If it still has to wait, give up the cpu. */ os_thread_yield(); } } LOG(THREAD, LOG_ASYNCH, 2, "handle_suspend_signal: awake now\n"); /* re-block so our exit from master_signal_handler is not interrupted */ sigprocmask_syscall(SIG_SETMASK, &prevmask, NULL, sizeof(prevmask)); ostd->suspended_sigcxt = NULL; /* Notify os_thread_resume that it can return now, which (assuming * suspend_count is back to 0) means it's then safe to re-suspend. */ ksynch_set_value(&ostd->suspended, 0); /*reset prior to signalling os_thread_resume*/ ksynch_set_value(&ostd->resumed, 1); ksynch_wake_all(&ostd->resumed); dcontext->whereami = prior_whereami; if (ostd->retakeover) { ostd->retakeover = false; sig_take_over(ucxt); /* shouldn't return for this case */ ASSERT_NOT_REACHED(); } else if (ostd->do_detach) { ostd->do_detach = false; sig_detach(dcontext, frame, &ostd->detached); /* no return */ ASSERT_NOT_REACHED(); } return false; /* do not pass to app */ } /* PR 206278: for try/except we need to save the signal mask */ void dr_setjmp_sigmask(dr_jmp_buf_t *buf) { /* i#226/PR 492568: we rely on the kernel storing the prior mask in the * signal frame, so we do not need to store it on every setjmp, which * can be a performance hit. */ #ifdef DEBUG sigprocmask_syscall(SIG_SETMASK, NULL, &buf->sigmask, sizeof(buf->sigmask)); #endif } /* i#61/PR 211530: nudge on Linux. * Determines whether this is a nudge signal, and if so queues up a nudge, * or is an app signal. Returns whether to pass the signal on to the app. */ static bool handle_nudge_signal(dcontext_t *dcontext, kernel_siginfo_t *siginfo, kernel_ucontext_t *ucxt) { sigcontext_t *sc = SIGCXT_FROM_UCXT(ucxt); nudge_arg_t *arg = (nudge_arg_t *)siginfo; instr_t instr; char buf[MAX_INSTR_LENGTH]; /* Distinguish a nudge from an app signal. An app using libc sigqueue() * will never have its signal mistaken as libc does not expose the kernel_siginfo_t * and always passes 0 for si_errno, so we're only worried beyond our * si_code check about an app using a raw syscall that is deliberately * trying to fool us. * While there is a lot of padding space in kernel_siginfo_t, the kernel doesn't * copy it through on SYS_rt_sigqueueinfo so we don't have room for any * dedicated magic numbers. The client id could function as a magic * number for client nudges, but I don't think we want to kill the app * if an external nudger types the client id wrong. */ LOG(THREAD, LOG_ASYNCH, 2, "%s: sig=%d code=%d errno=%d\n", __FUNCTION__, siginfo->si_signo, siginfo->si_code, siginfo->si_errno); if (siginfo->si_signo != NUDGESIG_SIGNUM /* PR 477454: remove the IF_NOT_VMX86 once we have nudge-arg support */ IF_NOT_VMX86(|| siginfo->si_code != SI_QUEUE || siginfo->si_errno == 0)) { return true; /* pass to app */ } #if defined(CLIENT_INTERFACE) && !defined(VMX86_SERVER) DODEBUG({ if (TEST(NUDGE_GENERIC(client), arg->nudge_action_mask) && !is_valid_client_id(arg->client_id)) { SYSLOG_INTERNAL_WARNING("received client nudge for invalid id=0x%x", arg->client_id); } }); #endif if (dynamo_exited || !dynamo_initialized || dcontext == NULL) { /* Ignore the nudge: too early, or too late. * Xref Windows handling of such cases in nudge.c: old case 5702, etc. * We do this before the illegal-instr check b/c it's unsafe to decode * if too early or too late. */ SYSLOG_INTERNAL_WARNING("too-early or too-late nudge: ignoring"); return false; /* do not pass to app */ } /* As a further check, try to detect whether this was raised synchronously * from a real illegal instr: though si_code for that should not be * SI_QUEUE. It's possible a nudge happened to come at a bad instr before * it faulted, or maybe the instr after a syscall or other wait spot is * illegal, but we'll live with that risk. */ ASSERT(NUDGESIG_SIGNUM == SIGILL); /* else this check makes no sense */ instr_init(dcontext, &instr); if (safe_read((byte *)sc->SC_XIP, sizeof(buf), buf) && (decode(dcontext, (byte *)buf, &instr) == NULL || /* check for ud2 (xref PR 523161) */ instr_is_undefined(&instr))) { LOG(THREAD, LOG_ASYNCH, 2, "%s: real illegal instr @" PFX "\n", __FUNCTION__, sc->SC_XIP); DOLOG(2, LOG_ASYNCH, { disassemble_with_bytes(dcontext, (byte *)sc->SC_XIP, THREAD); }); instr_free(dcontext, &instr); return true; /* pass to app */ } instr_free(dcontext, &instr); #ifdef VMX86_SERVER /* Treat as a client nudge until we have PR 477454 */ if (siginfo->si_errno == 0) { arg->version = NUDGE_ARG_CURRENT_VERSION; arg->flags = 0; arg->nudge_action_mask = NUDGE_GENERIC(client); arg->client_id = 0; arg->client_arg = 0; } #endif LOG(THREAD, LOG_ASYNCH, 1, "received nudge version=%u flags=0x%x mask=0x%x id=0x%08x " "arg=0x" ZHEX64_FORMAT_STRING "\n", arg->version, arg->flags, arg->nudge_action_mask, arg->client_id, arg->client_arg); SYSLOG_INTERNAL_INFO("received nudge mask=0x%x id=0x%08x arg=0x" ZHEX64_FORMAT_STRING, arg->nudge_action_mask, arg->client_id, arg->client_arg); /* We need to handle the nudge at a safe, nolinking spot */ if (safe_is_in_fcache(dcontext, (byte *)sc->SC_XIP, (byte *)sc->SC_XSP) && dcontext->interrupted_for_nudge == NULL) { /* We unlink the interrupted fragment and skip any inlined syscalls to * bound the nudge delivery time. If we already unlinked one we assume * that's sufficient. */ fragment_t wrapper; fragment_t *f = fragment_pclookup(dcontext, (byte *)sc->SC_XIP, &wrapper); if (f != NULL) { if (unlink_fragment_for_signal(dcontext, f, (byte *)sc->SC_XIP)) dcontext->interrupted_for_nudge = f; } } /* No lock is needed since thread-private and this signal is blocked now */ nudge_add_pending(dcontext, arg); return false; /* do not pass to app */ }
1
15,464
These need to be in the other order to avoid crashing when dcontext == GLOBAL_DCONTEXT (==-1)
DynamoRIO-dynamorio
c
@@ -24,7 +24,8 @@ namespace Http2SampleApp factory.SetMinimumLevel(LogLevel.Trace); factory.AddConsole(); }) - .UseKestrel((context, options) => + .UseKestrel() + .ConfigureKestrel((context, options) => { var basePort = context.Configuration.GetValue<int?>("BASE_PORT") ?? 5000;
1
using System; using System.IO; using System.Net; using System.Security.Authentication; using System.Threading.Tasks; using Microsoft.AspNetCore.Connections.Features; using Microsoft.AspNetCore.Hosting; using Microsoft.AspNetCore.Server.Kestrel.Core; using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal; using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal; using Microsoft.Extensions.Configuration; using Microsoft.Extensions.Logging; namespace Http2SampleApp { public class Program { public static void Main(string[] args) { var hostBuilder = new WebHostBuilder() .ConfigureLogging((_, factory) => { // Set logging to the MAX. factory.SetMinimumLevel(LogLevel.Trace); factory.AddConsole(); }) .UseKestrel((context, options) => { var basePort = context.Configuration.GetValue<int?>("BASE_PORT") ?? 5000; // Run callbacks on the transport thread options.ApplicationSchedulingMode = SchedulingMode.Inline; // Http/1.1 endpoint for comparison options.Listen(IPAddress.Any, basePort, listenOptions => { listenOptions.Protocols = HttpProtocols.Http1; listenOptions.UseConnectionLogging(); }); // TLS Http/1.1 or HTTP/2 endpoint negotiated via ALPN options.Listen(IPAddress.Any, basePort + 1, listenOptions => { listenOptions.Protocols = HttpProtocols.Http1AndHttp2; listenOptions.UseHttps("testCert.pfx", "testPassword"); listenOptions.UseConnectionLogging(); listenOptions.ConnectionAdapters.Add(new TlsFilterAdapter()); }); // Prior knowledge, no TLS handshake. WARNING: Not supported by browsers // but useful for the h2spec tests options.Listen(IPAddress.Any, basePort + 5, listenOptions => { listenOptions.Protocols = HttpProtocols.Http2; listenOptions.UseConnectionLogging(); }); }) .UseContentRoot(Directory.GetCurrentDirectory()) .UseStartup<Startup>(); hostBuilder.Build().Run(); } // https://tools.ietf.org/html/rfc7540#appendix-A // Allows filtering TLS handshakes on a per connection basis private class TlsFilterAdapter : IConnectionAdapter { public bool IsHttps => false; public Task<IAdaptedConnection> OnConnectionAsync(ConnectionAdapterContext context) { var tlsFeature = context.Features.Get<ITlsHandshakeFeature>(); if (tlsFeature.CipherAlgorithm == CipherAlgorithmType.Null) { throw new NotSupportedException("Prohibited cipher: " + tlsFeature.CipherAlgorithm); } return Task.FromResult<IAdaptedConnection>(new AdaptedConnection(context.ConnectionStream)); } private class AdaptedConnection : IAdaptedConnection { public AdaptedConnection(Stream adaptedStream) { ConnectionStream = adaptedStream; } public Stream ConnectionStream { get; } public void Dispose() { } } } } }
1
16,140
Why not change the other samples?
aspnet-KestrelHttpServer
.cs
@@ -10,8 +10,9 @@ table.isHeader = function (cell) { return true; } - if (cell.id) { - return !!document.querySelector('[headers~="' + axe.utils.escapeSelector(cell.id) + '"]'); + if (cell.getAttribute('id')) { + const id = axe.utils.escapeSelector(cell.getAttribute('id')); + return !!document.querySelector(`[headers~="${id}"]`); } return false;
1
/*global table, axe */ /** * Determine if a `HTMLTableCellElement` is a header * @param {HTMLTableCellElement} node The table cell to test * @return {Boolean} */ table.isHeader = function (cell) { if (table.isColumnHeader(cell) || table.isRowHeader(cell)) { return true; } if (cell.id) { return !!document.querySelector('[headers~="' + axe.utils.escapeSelector(cell.id) + '"]'); } return false; };
1
11,199
Indentation is mixed up here due to spaces/tabs, I'm guessing.
dequelabs-axe-core
js
@@ -738,9 +738,15 @@ static fpga_result poll_interrupt(fpga_dma_handle dma_h) { res = FPGA_EXCEPTION; } else { uint64_t count = 0; - read(pfd.fd, &count, sizeof(count)); - debug_print("Poll success. Return = %d, count = %d\n",poll_res, (int)count); - res = FPGA_OK; + ssize_t bytes_read = read(pfd.fd, &count, sizeof(count)); + if(bytes_read <= 0) { + fprintf( stderr, "Error: %s\n", + bytes_read < 0 ? strerror(errno) : "zero bytes read"); + res = FPGA_EXCEPTION; + } else { + debug_print("Poll success. Return = %d, count = %d\n",poll_res, (int)count); + res = FPGA_OK; + } } out:
1
// Copyright(c) 2017, Intel Corporation // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Intel Corporation nor the names of its contributors // may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMEdesc. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. /** * \fpga_dma.c * \brief FPGA DMA User-mode driver */ #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <opae/fpga.h> #include <stddef.h> #include <poll.h> #include <errno.h> #include <unistd.h> #include <assert.h> #include <safe_string/safe_string.h> #include "fpga_dma_internal.h" #include "fpga_dma.h" static int err_cnt = 0; /* * macro for checking return codes */ #define ON_ERR_GOTO(res, label, desc)\ do {\ if ((res) != FPGA_OK) {\ err_cnt++;\ fprintf(stderr, "Error %s: %s\n", (desc), fpgaErrStr(res));\ goto label;\ }\ } while (0) // Internal Functions // End of feature list static bool _fpga_dma_feature_eol(uint64_t dfh) { return ((dfh >> AFU_DFH_EOL_OFFSET) & 1) == 1; } // Feature type is BBB static bool _fpga_dma_feature_is_bbb(uint64_t dfh) { // BBB is type 2 return ((dfh >> AFU_DFH_TYPE_OFFSET) & 0xf) == FPGA_DMA_BBB; } // Offset to the next feature header static uint64_t _fpga_dma_feature_next(uint64_t dfh) { return (dfh >> AFU_DFH_NEXT_OFFSET) & 0xffffff; } // copy bytes to MMIO static fpga_result _copy_to_mmio(fpga_handle afc_handle, uint64_t mmio_dst, uint64_t *host_src, int len) { int i=0; fpga_result res = FPGA_OK; //mmio requires 8 byte alignment if(len % QWORD_BYTES != 0) return FPGA_INVALID_PARAM; if(mmio_dst % QWORD_BYTES != 0) return FPGA_INVALID_PARAM; uint64_t dev_addr = mmio_dst; uint64_t *host_addr = host_src; for(i = 0; i < len/QWORD_BYTES; i++) { res = fpgaWriteMMIO64(afc_handle, 0, dev_addr, *host_addr); if(res != FPGA_OK) return res; host_addr += 1; dev_addr += QWORD_BYTES; } return FPGA_OK; } static fpga_result _send_descriptor(fpga_dma_handle dma_h, msgdma_ext_desc_t desc) { fpga_result res = FPGA_OK; msgdma_status_t status = {0}; debug_print("desc.rd_address = %x\n",desc.rd_address); debug_print("desc.wr_address = %x\n",desc.wr_address); debug_print("desc.len = %x\n",desc.len); debug_print("desc.wr_burst_count = %x\n",desc.wr_burst_count); debug_print("desc.rd_burst_count = %x\n",desc.rd_burst_count); debug_print("desc.wr_stride %x\n",desc.wr_stride); debug_print("desc.rd_stride %x\n",desc.rd_stride); debug_print("desc.rd_address_ext %x\n",desc.rd_address_ext); debug_print("desc.wr_address_ext %x\n",desc.wr_address_ext); debug_print("SGDMA_CSR_BASE = %lx SGDMA_DESC_BASE=%lx\n",dma_h->dma_csr_base, dma_h->dma_desc_base); do { res = fpgaReadMMIO32(dma_h->fpga_h, dma_h->mmio_num, dma_h->dma_csr_base+offsetof(msgdma_csr_t, status), &status.reg); ON_ERR_GOTO(res, out, "fpgaReadMMIO64"); } while(status.st.desc_buf_full); res = _copy_to_mmio(dma_h->fpga_h, dma_h->dma_desc_base, (uint64_t *)&desc, sizeof(desc)); ON_ERR_GOTO(res, out, "_copy_to_mmio"); out: return res; } static fpga_result _do_dma(fpga_dma_handle dma_h, uint64_t dst, uint64_t src, int count, int is_last_desc, fpga_dma_transfer_t type, bool intr_en) { msgdma_ext_desc_t desc = {0}; fpga_result res = FPGA_OK; int alignment_offset = 0; int segment_size = 0; // src, dst and count must be 64-byte aligned if(dst%FPGA_DMA_ALIGN_BYTES !=0 || src%FPGA_DMA_ALIGN_BYTES !=0 || count%FPGA_DMA_ALIGN_BYTES!=0) { return FPGA_INVALID_PARAM; } // these fields are fixed for all DMA transfers desc.seq_num = 0; desc.wr_stride = 1; desc.rd_stride = 1; desc.control.go = 1; if(intr_en) desc.control.transfer_irq_en = 1; else desc.control.transfer_irq_en = 0; // Enable "earlyreaddone" in the control field of the descriptor except the last. // Setting early done causes the read logic to move to the next descriptor // before the previous descriptor completes. // This elminates a few hundred clock cycles of waiting between transfers. if(!is_last_desc) desc.control.early_done_en = 1; else desc.control.early_done_en = 0; if (type == FPGA_TO_FPGA_MM) { desc.rd_address = src & FPGA_DMA_MASK_32_BIT; desc.wr_address = dst & FPGA_DMA_MASK_32_BIT; desc.len = count; desc.wr_burst_count = 4; desc.rd_burst_count = 4; desc.rd_address_ext = (src >> 32) & FPGA_DMA_MASK_32_BIT; desc.wr_address_ext = (dst >> 32) & FPGA_DMA_MASK_32_BIT; res = _send_descriptor(dma_h, desc); ON_ERR_GOTO(res, out, "_send_descriptor"); } // either FPGA to Host or Host to FPGA transfer so we need to make sure the DMA transaction is aligned to the burst size (CCIP restriction) else { // need to determine if the CCIP (host) address is aligned to 4CL (256B). When 0 the CCIP address is aligned. alignment_offset = (type == HOST_TO_FPGA_MM)? (src % (4 * FPGA_DMA_ALIGN_BYTES)) : (dst % (4 * FPGA_DMA_ALIGN_BYTES)); // not aligned to 4CL so performing a short transfer to get aligned if (alignment_offset != 0) { desc.rd_address = src & FPGA_DMA_MASK_32_BIT; desc.wr_address = dst & FPGA_DMA_MASK_32_BIT; desc.wr_burst_count = 1; desc.rd_burst_count = 1; desc.rd_address_ext = (src >> 32) & FPGA_DMA_MASK_32_BIT; desc.wr_address_ext = (dst >> 32) & FPGA_DMA_MASK_32_BIT; // count isn't large enough to hit next 4CL boundary if (((4 * FPGA_DMA_ALIGN_BYTES) - alignment_offset) >= count) { segment_size = count; count = 0; // only had to transfer count amount of data to reach the end of the provided buffer } else { segment_size = (4 * FPGA_DMA_ALIGN_BYTES) - alignment_offset; src += segment_size; dst += segment_size; count -= segment_size; // subtract the segment size from count since the transfer below will bring us into 4CL alignment desc.control.transfer_irq_en = 0; } // will post short transfer to align to a 4CL (256 byte) boundary desc.len = segment_size; res = _send_descriptor(dma_h, desc); ON_ERR_GOTO(res, out, "_send_descriptor"); } // at this point we are 4CL (256 byte) aligned // if there is at least 4CL (256 bytes) of data to transfer, post bursts of 4 if (count >= (4 * FPGA_DMA_ALIGN_BYTES)) { desc.rd_address = src & FPGA_DMA_MASK_32_BIT; desc.wr_address = dst & FPGA_DMA_MASK_32_BIT; desc.wr_burst_count = 4; desc.rd_burst_count = 4; desc.rd_address_ext = (src >> 32) & FPGA_DMA_MASK_32_BIT; desc.wr_address_ext = (dst >> 32) & FPGA_DMA_MASK_32_BIT; // buffer ends on 4CL boundary if ((count % (4 * FPGA_DMA_ALIGN_BYTES)) == 0) { segment_size = count; count = 0; // transfer below will move the remainder of the buffer } // buffers do not end on 4CL boundary so transfer only up to the last 4CL boundary leaving a segment at the end to finish later else { segment_size = count - (count % (4 * FPGA_DMA_ALIGN_BYTES)); // round count down to the nearest multiple of 4CL src += segment_size; dst += segment_size; count -= segment_size; desc.control.transfer_irq_en = 0; } desc.len = segment_size; res = _send_descriptor(dma_h, desc); ON_ERR_GOTO(res, out, "_send_descriptor"); } // at this point we have posted all the bursts of length 4 we can but there might be 64, 128, or 192 bytes of data to transfer still // if buffer did not end on 4CL (256 byte) boundary post short transfer to handle the remainder if (count > 0) { desc.rd_address = src & FPGA_DMA_MASK_32_BIT; desc.wr_address = dst & FPGA_DMA_MASK_32_BIT; desc.len = count; desc.wr_burst_count = 1; desc.rd_burst_count = 1; desc.rd_address_ext = (src >> 32) & FPGA_DMA_MASK_32_BIT; desc.wr_address_ext = (dst >> 32) & FPGA_DMA_MASK_32_BIT; if(intr_en) desc.control.transfer_irq_en = 1; // will post short transfer to move the remainder of the buffer res = _send_descriptor(dma_h, desc); ON_ERR_GOTO(res, out, "_send_descriptor"); } } // end of FPGA --> Host or Host --> FPGA transfer out: return res; } // Public APIs fpga_result fpgaDmaOpen(fpga_handle fpga, fpga_dma_handle *dma_p) { fpga_result res = FPGA_OK; fpga_dma_handle dma_h = NULL; int i = 0; if(!fpga) { return FPGA_INVALID_PARAM; } if(!dma_p) { return FPGA_INVALID_PARAM; } // init the dma handle dma_h = (fpga_dma_handle)malloc(sizeof(struct _dma_handle_t)); if(!dma_h) { return FPGA_NO_MEMORY; } dma_h->fpga_h = fpga; for(i=0; i < FPGA_DMA_MAX_BUF; i++) dma_h->dma_buf_ptr[i] = NULL; dma_h->mmio_num = 0; dma_h->mmio_offset = 0; // Discover DMA BBB by traversing the device feature list bool end_of_list = false; bool dma_found = false; uint64_t dfh = 0; uint64_t offset = dma_h->mmio_offset; do { // Read the next feature header res = fpgaReadMMIO64(dma_h->fpga_h, dma_h->mmio_num, offset, &dfh); ON_ERR_GOTO(res, out, "fpgaReadMMIO64"); // Read the current feature's UUID uint64_t feature_uuid_lo, feature_uuid_hi; res = fpgaReadMMIO64(dma_h->fpga_h, dma_h->mmio_num, offset + 8, &feature_uuid_lo); ON_ERR_GOTO(res, out, "fpgaReadMMIO64"); res = fpgaReadMMIO64(dma_h->fpga_h, dma_h->mmio_num, offset + 16, &feature_uuid_hi); ON_ERR_GOTO(res, out, "fpgaReadMMIO64"); if (_fpga_dma_feature_is_bbb(dfh) && (feature_uuid_lo == FPGA_DMA_UUID_L) && (feature_uuid_hi == FPGA_DMA_UUID_H) ) { // Found one. Record it. dma_h->dma_base = offset; dma_h->dma_csr_base = dma_h->dma_base+FPGA_DMA_CSR; dma_h->dma_desc_base = dma_h->dma_base+FPGA_DMA_DESC; dma_h->dma_ase_cntl_base = dma_h->dma_base+FPGA_DMA_ADDR_SPAN_EXT_CNTL; dma_h->dma_ase_data_base = dma_h->dma_base+FPGA_DMA_ADDR_SPAN_EXT_DATA; dma_found = true; break; } // End of the list? end_of_list = _fpga_dma_feature_eol(dfh); // Move to the next feature header offset = offset + _fpga_dma_feature_next(dfh); } while(!end_of_list); if(dma_found) { *dma_p = dma_h; res = FPGA_OK; } else { *dma_p = NULL; res = FPGA_NOT_FOUND; goto out; } // Buffer size must be page aligned for prepareBuffer for(i=0; i< FPGA_DMA_MAX_BUF; i++) { res = fpgaPrepareBuffer(dma_h->fpga_h, FPGA_DMA_BUF_SIZE, (void **)&(dma_h->dma_buf_ptr[i]), &dma_h->dma_buf_wsid[i], 0); ON_ERR_GOTO(res, out, "fpgaPrepareBuffer"); res = fpgaGetIOAddress(dma_h->fpga_h, dma_h->dma_buf_wsid[i], &dma_h->dma_buf_iova[i]); ON_ERR_GOTO(res, rel_buf, "fpgaGetIOAddress"); } // Allocate magic number buffer res = fpgaPrepareBuffer(dma_h->fpga_h, FPGA_DMA_ALIGN_BYTES, (void **)&(dma_h->magic_buf), &dma_h->magic_wsid, 0); ON_ERR_GOTO(res, out, "fpgaPrepareBuffer"); res = fpgaGetIOAddress(dma_h->fpga_h, dma_h->magic_wsid, &dma_h->magic_iova); ON_ERR_GOTO(res, rel_buf, "fpgaGetIOAddress"); memset((void*)dma_h->magic_buf, 0, FPGA_DMA_ALIGN_BYTES); // turn on global interrupts msgdma_ctrl_t ctrl = {0}; ctrl.ct.global_intr_en_mask = 1; res = fpgaWriteMMIO32(dma_h->fpga_h, 0, dma_h->dma_csr_base+offsetof(msgdma_csr_t, ctrl), ctrl.reg); ON_ERR_GOTO(res, rel_buf, "fpgaWriteMMIO32"); // register interrupt event handle res = fpgaCreateEventHandle(&dma_h->eh); ON_ERR_GOTO(res, rel_buf, "fpgaCreateEventHandle"); res = fpgaRegisterEvent(dma_h->fpga_h, FPGA_EVENT_INTERRUPT, dma_h->eh, 0/*vector id*/); ON_ERR_GOTO(res, destroy_eh, "fpgaRegisterEvent"); return FPGA_OK; destroy_eh: res = fpgaDestroyEventHandle(&dma_h->eh); ON_ERR_GOTO(res, rel_buf, "fpgaRegisterEvent"); rel_buf: for(i=0; i< FPGA_DMA_MAX_BUF; i++) { res = fpgaReleaseBuffer(dma_h->fpga_h, dma_h->dma_buf_wsid[i]); ON_ERR_GOTO(res, out, "fpgaReleaseBuffer"); } out: if(!dma_found) free(dma_h); return res; } /** * _read_memory_mmio_unaligned * * @brief Performs a unaligned read(address not 4/8/64 byte aligned) from FPGA address(device address). * @param[in] dma Handle to the FPGA DMA object * @param[in] dev_addr FPGA address * @param[in] host_addr Host buffer address * @param[in] count Size in bytes, always less than 8bytes. * @return fpga_result FPGA_OK on success, return code otherwise * */ static fpga_result _read_memory_mmio_unaligned(fpga_dma_handle dma_h, uint64_t dev_addr,uint64_t host_addr, uint64_t count) { fpga_result res = FPGA_OK; uint64_t shift = dev_addr % QWORD_BYTES; debug_print("shift = %08lx , count = %08lx \n",shift, count); uint64_t dev_aligned_addr = dev_addr - shift; //read data from device memory uint64_t read_tmp = 0; res = fpgaReadMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_data_base+(dev_aligned_addr&DMA_ADDR_SPAN_EXT_WINDOW_MASK), &read_tmp); if(res != FPGA_OK) return res; //overlay our data if(count > FPGA_DMA_ALIGN_BYTES) { res = FPGA_NO_MEMORY; ON_ERR_GOTO(res, out, "Illegal transfer size\n"); } memcpy((void *)host_addr, ((char *)(&read_tmp))+shift, count); out: return res; } /** * _write_memory_mmio_unaligned * * @brief Performs a unaligned write(address not 4/8/64 byte aligned) to FPGA address(device address). * @param[in] dma Handle to the FPGA DMA object * @param[in] dev_addr FPGA address * @param[in] host_addr Host buffer address * @param[in] count Size in bytes, always less than 8bytes. * @return fpga_result FPGA_OK on success, return code otherwise * */ static fpga_result _write_memory_mmio_unaligned(fpga_dma_handle dma_h, uint64_t dev_addr,uint64_t host_addr, uint64_t count) { fpga_result res = FPGA_OK; uint64_t shift = dev_addr % QWORD_BYTES; debug_print("shift = %08lx , count = %08lx \n",shift, count); uint64_t dev_aligned_addr = dev_addr - shift; //read data from device memory uint64_t read_tmp = 0; res = fpgaReadMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_data_base+(dev_aligned_addr&DMA_ADDR_SPAN_EXT_WINDOW_MASK), &read_tmp); if(res != FPGA_OK) return res; //overlay our data if(count > FPGA_DMA_ALIGN_BYTES) { res = FPGA_NO_MEMORY; ON_ERR_GOTO(res, out, "Illegal transfer size\n"); } memcpy(((char *)(&read_tmp))+shift, (void *)host_addr, count); //write back to device res = fpgaWriteMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_data_base+(dev_aligned_addr&DMA_ADDR_SPAN_EXT_WINDOW_MASK), read_tmp); if(res != FPGA_OK) return res; out: return res; } /** * _write_memory_mmio * * @brief Writes to a DWORD/QWORD aligned memory address(FPGA address). * @param[in] dma Handle to the FPGA DMA object * @param[in/out] dst_ptr FPGA address * @param[in/out] src_ptr Host buffer address * @param[in/out] count Size in bytes * @return fpga_result FPGA_OK on success, return code otherwise * */ static fpga_result _write_memory_mmio(fpga_dma_handle dma_h, uint64_t *dst_ptr,uint64_t *src_ptr, uint64_t* count) { fpga_result res = FPGA_OK; uint64_t src = *src_ptr; uint64_t dst = *dst_ptr; uint64_t align_bytes = *count; uint64_t cur_mem_page = 0; uint64_t offset = 0; uint64_t i = 0; uint64_t alignment=0; if(IS_ALIGNED_QWORD(dst)) alignment = QWORD_BYTES; else if(IS_ALIGNED_DWORD(dst)) alignment = DWORD_BYTES; if(alignment == 0) return FPGA_EXCEPTION; fpgaReadMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_cntl_base, &cur_mem_page); for(i = 0; i < align_bytes/alignment ; i++) { uint64_t mem_page = dst & ~DMA_ADDR_SPAN_EXT_WINDOW_MASK; if(mem_page != cur_mem_page) { cur_mem_page = mem_page; fpgaWriteMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_cntl_base, cur_mem_page); } offset = dma_h->dma_ase_data_base+(dst&DMA_ADDR_SPAN_EXT_WINDOW_MASK); if(alignment == QWORD_BYTES) res = fpgaWriteMMIO64(dma_h->fpga_h, 0, offset, *(uint64_t *)src); else if(alignment == DWORD_BYTES) res = fpgaWriteMMIO32(dma_h->fpga_h, 0, offset, *(uint64_t *)src); if(res != FPGA_OK) return res; src += alignment; dst += alignment; } align_bytes -= (align_bytes/alignment)*alignment; *src_ptr = src; *dst_ptr = dst; *count = align_bytes; return res; } /** * _ase_host_to_fpga * * @brief Tx "count" bytes from HOST to FPGA using Address span expander(ASE)- will internally make calls to handle unaligned and aligned MMIO writes. * @param[in] dma Handle to the FPGA DMA object * @param[in/out] dst_ptr FPGA address * @param[in/out] src_ptr Host buffer address * @param[in] count Size in bytes * @return fpga_result FPGA_OK on success, return code otherwise * */ static fpga_result _ase_host_to_fpga(fpga_dma_handle dma_h, uint64_t *dst_ptr,uint64_t *src_ptr, uint64_t count) { fpga_result res = FPGA_OK; uint64_t dst = *dst_ptr; uint64_t src = *src_ptr; uint64_t count_left = count; uint64_t mmio_shift = 0; uint64_t unaligned_size = 0; do { //Set the Address Span expander CTRL port to the required 4K window uint64_t cur_mem_page = dst & ~DMA_ADDR_SPAN_EXT_WINDOW_MASK; res = fpgaWriteMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_cntl_base , cur_mem_page); if(res != FPGA_OK) return res; //Can use for debug if dst span was set to the right 4K //mmio_read64(dma_h->fpga_h, (dma_h->dma_base)+FPGA_DMA_ADDR_SPAN_EXT_CNTL, &data, "addr_span"); //Aligns address to 8 byte using dst masking method if(!IS_ALIGNED_QWORD(dst) && !IS_ALIGNED_DWORD(dst)) { mmio_shift = dst % QWORD_BYTES; unaligned_size = QWORD_BYTES - mmio_shift; if(unaligned_size > count_left) unaligned_size = count_left; res = _write_memory_mmio_unaligned(dma_h,dst,src,unaligned_size); if(res != FPGA_OK) return res; count_left -= unaligned_size; src += unaligned_size; dst += unaligned_size; } if(count_left) { //Handles 8/4 byte MMIO transfer if(IS_ALIGNED_QWORD(dst)) { res = _write_memory_mmio(dma_h, &dst, &src, &count_left); if(res != FPGA_OK) return res; } if(IS_ALIGNED_DWORD(dst)) { res = _write_memory_mmio(dma_h, &dst, &src, &count_left); if(res != FPGA_OK) return res; }//Left over unaligned count bytes are transfered using dst masking method if(count_left) { mmio_shift = dst % QWORD_BYTES; unaligned_size = QWORD_BYTES - mmio_shift; if(unaligned_size > count_left) unaligned_size = count_left; res = _write_memory_mmio_unaligned(dma_h,dst,src,unaligned_size); if(res != FPGA_OK) return res; count_left -= unaligned_size; src += unaligned_size; dst += unaligned_size; } } } while(count_left!=0 ); *dst_ptr = dst; *src_ptr = src; debug_print("dst_ptr = %08lx , count = %08lx, src = %08lx \n", *dst_ptr, count, *src_ptr); if(count_left != 0) { debug_print("%08lx bytes left to transfer, MMIO needs tx len to be 8/4 byte aligned \n", count_left); return FPGA_NOT_SUPPORTED; } return FPGA_OK; } /** * _read_memory_mmio * * @brief Reads a DWORD/QWORD aligned memory address(FPGA address). * @param[in] dma Handle to the FPGA DMA object * @param[in/out] dst_ptr Host Buffer Address * @param[in/out] src_ptr FPGA address * @param[in/out] count Size in bytes * @return fpga_result FPGA_OK on success, return code otherwise * */ static fpga_result _read_memory_mmio(fpga_dma_handle dma_h, uint64_t *src_ptr,uint64_t *dst_ptr, uint64_t* count) { fpga_result res = FPGA_OK; uint64_t src = *src_ptr; uint64_t dst = *dst_ptr; uint64_t align_bytes = *count; uint64_t cur_mem_page = 0; uint64_t offset = 0; uint64_t i = 0; uint64_t alignment = 0; if(IS_ALIGNED_QWORD(src)) alignment = QWORD_BYTES; else if(IS_ALIGNED_DWORD(src)) alignment = DWORD_BYTES; if(alignment == 0) return FPGA_EXCEPTION; fpgaReadMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_cntl_base, &cur_mem_page); for(i = 0; i < align_bytes/alignment ; i++) { uint64_t mem_page = src & ~DMA_ADDR_SPAN_EXT_WINDOW_MASK; if(mem_page != cur_mem_page) { cur_mem_page = mem_page; fpgaWriteMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_cntl_base, cur_mem_page); } offset = dma_h->dma_ase_data_base+(src&DMA_ADDR_SPAN_EXT_WINDOW_MASK); if(alignment == QWORD_BYTES) res = fpgaReadMMIO64(dma_h->fpga_h, 0, offset, (uint64_t *)dst); else if(alignment == DWORD_BYTES) res = fpgaReadMMIO32(dma_h->fpga_h, 0, offset, (uint32_t *)dst); if(res != FPGA_OK) return res; dst += alignment; src += alignment; } align_bytes -= (align_bytes/alignment)*alignment; *src_ptr = src; *dst_ptr = dst; *count = align_bytes; return res; } /** * _ase_fpga_to_host * * @brief Tx "count" bytes from FPGA to HOST using Address span expander(ASE)- will internally make calls to handle unaligned and aligned MMIO writes. * @param[in] dma Handle to the FPGA DMA object * @param[in/out] dst_ptr Host Buffer Address * @param[in/out] src_ptr FPGA address * @param[in/out] count Size in bytes * @return fpga_result FPGA_OK on success, return code otherwise * */ static fpga_result _ase_fpga_to_host(fpga_dma_handle dma_h, uint64_t *src_ptr,uint64_t *dst_ptr, uint64_t count) { fpga_result res = FPGA_OK; uint64_t src = *src_ptr; uint64_t dst = *dst_ptr; uint64_t count_left = count; uint64_t mmio_shift = 0; uint64_t unaligned_size = 0; do { //Set the Address Span expander CTRL port to the required 4K window uint64_t cur_mem_page = src & ~DMA_ADDR_SPAN_EXT_WINDOW_MASK; res = fpgaWriteMMIO64(dma_h->fpga_h, 0, dma_h->dma_ase_cntl_base , cur_mem_page); if(res != FPGA_OK) return res; //Can use for debug if src span was set to the right 4K //mmio_read64(dma_h->fpga_h, (dma_h->dma_base)+FPGA_DMA_ADDR_SPAN_EXT_CNTL, &data, "addr_span"); //Aligns address to 8 byte using src masking method if(!IS_ALIGNED_QWORD(src) && !IS_ALIGNED_DWORD(src)) { mmio_shift = src % QWORD_BYTES; unaligned_size = QWORD_BYTES - mmio_shift; if(unaligned_size > count_left) unaligned_size = count_left; res = _read_memory_mmio_unaligned(dma_h, src, dst, unaligned_size); if(res != FPGA_OK) return res; count_left -= unaligned_size; src += unaligned_size; dst += unaligned_size; } if(count_left) { //Handles 8/4 byte MMIO transfer if(IS_ALIGNED_QWORD(src)) { res = _read_memory_mmio(dma_h, &src, &dst, &count_left); if(res != FPGA_OK) return res; } if(IS_ALIGNED_DWORD(src)) { res = _read_memory_mmio(dma_h, &src, &dst, &count_left); if(res != FPGA_OK) return res; }//Left over unaligned count bytes are transfered using src masking method if(count_left) { mmio_shift = src % QWORD_BYTES; unaligned_size = QWORD_BYTES - mmio_shift; if(unaligned_size > count_left) unaligned_size = count_left; res = _read_memory_mmio_unaligned(dma_h, src, dst, unaligned_size); if(res != FPGA_OK) return res; count_left -= unaligned_size; src += unaligned_size; dst += unaligned_size; } } }while(count_left!=0); *src_ptr = src; *dst_ptr = dst; debug_print("src_ptr = %08lx , count_left = %08lx, dst = %08lx \n", *src_ptr, count_left, *dst_ptr); if(count_left != 0) { debug_print("%08lx bytes left to transfer, MMIO needs tx len to be 8/4 byte aligned \n", count_left); return FPGA_NOT_SUPPORTED; } return FPGA_OK; } static fpga_result clear_interrupt(fpga_dma_handle dma_h) { //clear interrupt by writing 1 to IRQ bit in status register msgdma_status_t status = {0}; status.st.irq = 1; msgdma_csr_t *csr = (msgdma_csr_t*)(dma_h->dma_csr_base); return fpgaWriteMMIO32(dma_h->fpga_h, dma_h->mmio_num, (uint64_t)((char*)csr+offsetof(msgdma_csr_t, status)), status.reg); } static fpga_result poll_interrupt(fpga_dma_handle dma_h) { struct pollfd pfd = {0}; fpga_result res = FPGA_OK; res = fpgaGetOSObjectFromEventHandle(dma_h->eh, &pfd.fd); ON_ERR_GOTO(res, out, "fpgaGetOSObjectFromEventHandle failed\n"); pfd.events = POLLIN; int poll_res = poll(&pfd, 1, -1); if(poll_res < 0) { fprintf( stderr, "Poll error errno = %s\n",strerror(errno)); res = FPGA_EXCEPTION; goto out; } else if(poll_res == 0) { fprintf( stderr, "Poll(interrupt) timeout \n"); res = FPGA_EXCEPTION; } else { uint64_t count = 0; read(pfd.fd, &count, sizeof(count)); debug_print("Poll success. Return = %d, count = %d\n",poll_res, (int)count); res = FPGA_OK; } out: clear_interrupt(dma_h); return res; } static fpga_result _issue_magic(fpga_dma_handle dma_h) { fpga_result res = FPGA_OK; *(dma_h->magic_buf) = 0x0ULL; msgdma_status_t status = {0}; msgdma_csr_t *csr = (msgdma_csr_t*)(dma_h->dma_csr_base); res = fpgaReadMMIO32(dma_h->fpga_h, dma_h->mmio_num, (uint64_t)((char*)csr+offsetof(msgdma_csr_t, status)), &status.reg); res = _do_dma(dma_h, dma_h->magic_iova | FPGA_DMA_WF_HOST_MASK, FPGA_DMA_WF_ROM_MAGIC_NO_MASK, 64, 1, FPGA_TO_HOST_MM, true/*intr_en*/); return res; } static void _wait_magic(fpga_dma_handle dma_h) { poll_interrupt(dma_h); while (*(dma_h->magic_buf) != FPGA_DMA_WF_MAGIC_NO); *(dma_h->magic_buf) = 0x0ULL; } fpga_result transferHostToFpga(fpga_dma_handle dma_h, uint64_t dst, uint64_t src, size_t count, fpga_dma_transfer_t type) { fpga_result res = FPGA_OK; uint64_t i = 0; uint64_t count_left = count; uint64_t aligned_addr = 0; uint64_t align_bytes = 0; int issued_intr = 0; debug_print("Host To Fpga ----------- src = %08lx, dst = %08lx \n", src, dst); if(!IS_DMA_ALIGNED(dst)) { if(count_left < FPGA_DMA_ALIGN_BYTES) { res = _ase_host_to_fpga(dma_h,&dst,&src,count_left); ON_ERR_GOTO(res, out, "HOST_TO_FPGA_MM Transfer failed\n"); return res; } else { aligned_addr = ((dst/FPGA_DMA_ALIGN_BYTES)+1)*FPGA_DMA_ALIGN_BYTES; align_bytes = aligned_addr - dst; res = _ase_host_to_fpga(dma_h,&dst,&src,align_bytes); ON_ERR_GOTO(res, out, "HOST_TO_FPGA_MM Transfer failed\n"); count_left = count_left - align_bytes; } } if(count_left) { uint32_t dma_chunks = count_left/FPGA_DMA_BUF_SIZE; count_left -= (dma_chunks*FPGA_DMA_BUF_SIZE); debug_print("DMA TX : dma chuncks = %d, count_left = %08lx, dst = %08lx, src = %08lx \n", dma_chunks, count_left, dst, src); for(i=0; i<dma_chunks; i++) { // constant size transfer, no length check required for memcpy memcpy(dma_h->dma_buf_ptr[i%FPGA_DMA_MAX_BUF], (void*)(src+i*FPGA_DMA_BUF_SIZE), FPGA_DMA_BUF_SIZE); if((i%(FPGA_DMA_MAX_BUF/2) == (FPGA_DMA_MAX_BUF/2)-1) || i == (dma_chunks - 1)/*last descriptor*/) { if(i == (FPGA_DMA_MAX_BUF/2)-1) { res = _do_dma(dma_h, (dst+i*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_iova[i%FPGA_DMA_MAX_BUF] | FPGA_DMA_HOST_MASK, FPGA_DMA_BUF_SIZE,0, type, true/*intr_en*/); } else { if(issued_intr) poll_interrupt(dma_h); res = _do_dma(dma_h, (dst+i*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_iova[i%FPGA_DMA_MAX_BUF] | FPGA_DMA_HOST_MASK, FPGA_DMA_BUF_SIZE,0, type, true/*intr_en*/); } issued_intr = 1; } else { res = _do_dma(dma_h, (dst+i*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_iova[i%FPGA_DMA_MAX_BUF] | FPGA_DMA_HOST_MASK, FPGA_DMA_BUF_SIZE,0, type, false/*intr_en*/); } } if(issued_intr) { poll_interrupt(dma_h); issued_intr = 0; } if(count_left) { uint64_t dma_tx_bytes = (count_left/FPGA_DMA_ALIGN_BYTES)*FPGA_DMA_ALIGN_BYTES; if(dma_tx_bytes != 0) { debug_print("dma_tx_bytes = %08lx was transfered using DMA\n", dma_tx_bytes); if(dma_tx_bytes > FPGA_DMA_BUF_SIZE) { res = FPGA_NO_MEMORY; ON_ERR_GOTO(res, out, "Illegal transfer size\n"); } memcpy(dma_h->dma_buf_ptr[0], (void*)(src+dma_chunks*FPGA_DMA_BUF_SIZE), dma_tx_bytes); res = _do_dma(dma_h, (dst+dma_chunks*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_iova[0] | FPGA_DMA_HOST_MASK, dma_tx_bytes,1, type, true/*intr_en*/); ON_ERR_GOTO(res, out, "HOST_TO_FPGA_MM Transfer failed\n"); poll_interrupt(dma_h); } count_left -= dma_tx_bytes; if(count_left) { dst = dst + dma_chunks*FPGA_DMA_BUF_SIZE + dma_tx_bytes; src = src + dma_chunks*FPGA_DMA_BUF_SIZE + dma_tx_bytes; res = _ase_host_to_fpga(dma_h,&dst,&src,count_left); ON_ERR_GOTO(res, out, "HOST_TO_FPGA_MM Transfer failed\n"); } } } out: return res; } fpga_result transferFpgaToHost(fpga_dma_handle dma_h, uint64_t dst, uint64_t src, size_t count, fpga_dma_transfer_t type) { fpga_result res = FPGA_OK; uint64_t i = 0; uint64_t j = 0; uint64_t count_left = count; uint64_t aligned_addr = 0; uint64_t align_bytes = 0; int wf_issued = 0; debug_print("FPGA To Host ----------- src = %08lx, dst = %08lx \n", src, dst); if(!IS_DMA_ALIGNED(src)) { if(count_left < FPGA_DMA_ALIGN_BYTES) { res = _ase_fpga_to_host(dma_h,&src,&dst,count_left); ON_ERR_GOTO(res, out, "FPGA_TO_HOST_MM Transfer failed"); return res; } else { aligned_addr = ((src/FPGA_DMA_ALIGN_BYTES)+1)*FPGA_DMA_ALIGN_BYTES; align_bytes = aligned_addr - src; res = _ase_fpga_to_host(dma_h,&src,&dst,align_bytes); ON_ERR_GOTO(res, out, "FPGA_TO_HOST_MM Transfer failed"); count_left = count_left - align_bytes; } } if(count_left) { uint32_t dma_chunks = count_left/FPGA_DMA_BUF_SIZE; count_left -= (dma_chunks*FPGA_DMA_BUF_SIZE); debug_print("DMA TX : dma chunks = %d, count_left = %08lx, dst = %08lx, src = %08lx \n", dma_chunks, count_left, dst, src); assert(FPGA_DMA_MAX_BUF >= 8); uint64_t pending_buf = 0; for(i=0; i<dma_chunks; i++) { res = _do_dma(dma_h, dma_h->dma_buf_iova[i%(FPGA_DMA_MAX_BUF)] | FPGA_DMA_HOST_MASK, (src+i*FPGA_DMA_BUF_SIZE), FPGA_DMA_BUF_SIZE, 1, type, false/*intr_en*/); ON_ERR_GOTO(res, out, "FPGA_TO_HOST_MM Transfer failed"); const int num_pending = i-pending_buf+1; if(num_pending == (FPGA_DMA_MAX_BUF/2)) { //Enters this loop only once,after first batch of descriptors. res = _issue_magic(dma_h); ON_ERR_GOTO(res, out, "Magic number issue failed"); wf_issued = 1; } if(num_pending > (FPGA_DMA_MAX_BUF-1) || i == (dma_chunks - 1)/*last descriptor*/) { if(wf_issued) { _wait_magic(dma_h); for(j=0; j<(FPGA_DMA_MAX_BUF/2); j++) { // constant size transfer; no length check required memcpy((void*)(dst+pending_buf*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_ptr[pending_buf%(FPGA_DMA_MAX_BUF)], FPGA_DMA_BUF_SIZE); pending_buf++; } wf_issued = 0; } res = _issue_magic(dma_h); ON_ERR_GOTO(res, out, "Magic number issue failed"); wf_issued = 1; } } if(wf_issued) _wait_magic(dma_h); //clear out final dma memcpy operations while(pending_buf<dma_chunks) { // constant size transfer; no length check required memcpy((void*)(dst+pending_buf*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_ptr[pending_buf%(FPGA_DMA_MAX_BUF)], FPGA_DMA_BUF_SIZE); pending_buf++; } if(count_left > 0) { uint64_t dma_tx_bytes = (count_left/FPGA_DMA_ALIGN_BYTES)*FPGA_DMA_ALIGN_BYTES; if(dma_tx_bytes != 0) { debug_print("dma_tx_bytes = %08lx was transfered using DMA\n", dma_tx_bytes); res = _do_dma(dma_h, dma_h->dma_buf_iova[0] | FPGA_DMA_HOST_MASK, (src+dma_chunks*FPGA_DMA_BUF_SIZE), dma_tx_bytes, 1, type, false/*intr_en*/); ON_ERR_GOTO(res, out, "FPGA_TO_HOST_MM Transfer failed"); res = _issue_magic(dma_h); ON_ERR_GOTO(res, out, "Magic number issue failed"); _wait_magic(dma_h); if(dma_tx_bytes > FPGA_DMA_BUF_SIZE) { res = FPGA_NO_MEMORY; ON_ERR_GOTO(res, out, "Illegal transfer size\n"); } memcpy((void*)(dst+dma_chunks*FPGA_DMA_BUF_SIZE), dma_h->dma_buf_ptr[0], dma_tx_bytes); } count_left -= dma_tx_bytes; if(count_left) { dst = dst + dma_chunks*FPGA_DMA_BUF_SIZE + dma_tx_bytes; src = src + dma_chunks*FPGA_DMA_BUF_SIZE + dma_tx_bytes; res = _ase_fpga_to_host(dma_h,&src,&dst, count_left); ON_ERR_GOTO(res, out, "FPGA_TO_HOST_MM Transfer failed"); } } } out: return res; } fpga_result transferFpgaToFpga(fpga_dma_handle dma_h, uint64_t dst, uint64_t src, size_t count, fpga_dma_transfer_t type) { fpga_result res = FPGA_OK; uint64_t i = 0; uint64_t count_left = count; uint64_t *tmp_buf = NULL; if(IS_DMA_ALIGNED(dst) && IS_DMA_ALIGNED(src) && IS_DMA_ALIGNED(count_left)) { uint32_t dma_chunks = count_left/FPGA_DMA_BUF_SIZE; count_left -= (dma_chunks*FPGA_DMA_BUF_SIZE); debug_print("!!!FPGA to FPGA!!! TX :dma chunks = %d, count = %08lx, dst = %08lx, src = %08lx \n", dma_chunks, count_left, dst, src); for(i=0; i<dma_chunks; i++) { res = _do_dma(dma_h, (dst+i*FPGA_DMA_BUF_SIZE), (src+i*FPGA_DMA_BUF_SIZE), FPGA_DMA_BUF_SIZE, 0, type, false/*intr_en*/); ON_ERR_GOTO(res, out, "FPGA_TO_FPGA_MM Transfer failed"); if( (i+1) % FPGA_DMA_MAX_BUF ==0 || i == (dma_chunks - 1)/*last descriptor*/) { res = _issue_magic(dma_h); ON_ERR_GOTO(res, out, "Magic number issue failed"); _wait_magic(dma_h); } } if(count_left > 0) { debug_print("Count_left = %08lx was transfered using DMA\n", count_left); res = _do_dma(dma_h, (dst+dma_chunks*FPGA_DMA_BUF_SIZE), (src+dma_chunks*FPGA_DMA_BUF_SIZE), count_left, 1, type, false/*intr_en*/); ON_ERR_GOTO(res, out, "FPGA_TO_FPGA_MM Transfer failed"); res = _issue_magic(dma_h); ON_ERR_GOTO(res, out, "Magic number issue failed"); _wait_magic(dma_h); } }else { if((src < dst) && (src+count_left >= dst)) { debug_print("Overlapping addresses, Provide correct dst address\n"); return FPGA_NOT_SUPPORTED; } uint32_t tx_chunks = count_left/FPGA_DMA_BUF_ALIGN_SIZE; count_left -= (tx_chunks*FPGA_DMA_BUF_ALIGN_SIZE); debug_print("!!!FPGA to FPGA TX!!! : tx chunks = %d, count = %08lx, dst = %08lx, src = %08lx \n", tx_chunks, count_left, dst, src); for(i=0; i<tx_chunks; i++) { tmp_buf = (uint64_t*)malloc(FPGA_DMA_BUF_ALIGN_SIZE); res = transferFpgaToHost(dma_h, (uint64_t)tmp_buf, (src+i*FPGA_DMA_BUF_ALIGN_SIZE), FPGA_DMA_BUF_ALIGN_SIZE, FPGA_TO_HOST_MM); ON_ERR_GOTO(res, out_spl, "FPGA_TO_FPGA_MM Transfer failed"); res = transferHostToFpga(dma_h, (dst+i*FPGA_DMA_BUF_ALIGN_SIZE), (uint64_t)tmp_buf, FPGA_DMA_BUF_ALIGN_SIZE, HOST_TO_FPGA_MM); ON_ERR_GOTO(res, out_spl, "FPGA_TO_FPGA_MM Transfer failed"); free(tmp_buf); } if(count_left > 0) { tmp_buf = (uint64_t*)malloc(count_left); res = transferFpgaToHost(dma_h, (uint64_t)tmp_buf, (src+tx_chunks*FPGA_DMA_BUF_ALIGN_SIZE), count_left, FPGA_TO_HOST_MM); ON_ERR_GOTO(res, out_spl, "FPGA_TO_FPGA_MM Transfer failed"); res = transferHostToFpga(dma_h, (dst+tx_chunks*FPGA_DMA_BUF_ALIGN_SIZE), (uint64_t)tmp_buf, count_left, HOST_TO_FPGA_MM); ON_ERR_GOTO(res, out_spl, "FPGA_TO_FPGA_MM Transfer failed"); free(tmp_buf); } } out: return res; out_spl: free(tmp_buf); return res; } fpga_result fpgaDmaTransferSync(fpga_dma_handle dma_h, uint64_t dst, uint64_t src, size_t count, fpga_dma_transfer_t type) { fpga_result res = FPGA_OK; if(!dma_h) return FPGA_INVALID_PARAM; if(type >= FPGA_MAX_TRANSFER_TYPE) return FPGA_INVALID_PARAM; if(!(type == HOST_TO_FPGA_MM || type == FPGA_TO_HOST_MM || type == FPGA_TO_FPGA_MM)) return FPGA_NOT_SUPPORTED; if(!dma_h->fpga_h) return FPGA_INVALID_PARAM; if(type == HOST_TO_FPGA_MM) { res = transferHostToFpga(dma_h, dst, src, count, HOST_TO_FPGA_MM); } else if(type == FPGA_TO_HOST_MM) { res = transferFpgaToHost(dma_h, dst, src, count, FPGA_TO_HOST_MM); } else if(type == FPGA_TO_FPGA_MM) { res = transferFpgaToFpga(dma_h, dst, src, count, FPGA_TO_FPGA_MM); } else { return FPGA_NOT_SUPPORTED; } return res; } fpga_result fpgaDmaTransferAsync(fpga_dma_handle dma, uint64_t dst, uint64_t src, size_t count, fpga_dma_transfer_t type, fpga_dma_transfer_cb cb, void *context) { // TODO return FPGA_NOT_SUPPORTED; } fpga_result fpgaDmaClose(fpga_dma_handle dma_h) { fpga_result res = FPGA_OK; int i = 0; if(!dma_h) { res = FPGA_INVALID_PARAM; goto out; } if(!dma_h->fpga_h) { res = FPGA_INVALID_PARAM; goto out; } for(i=0; i<FPGA_DMA_MAX_BUF; i++) { res = fpgaReleaseBuffer(dma_h->fpga_h, dma_h->dma_buf_wsid[i]); ON_ERR_GOTO(res, out, "fpgaReleaseBuffer failed"); } res = fpgaReleaseBuffer(dma_h->fpga_h, dma_h->magic_wsid); ON_ERR_GOTO(res, out, "fpgaReleaseBuffer"); fpgaUnregisterEvent(dma_h->fpga_h, FPGA_EVENT_INTERRUPT, dma_h->eh); fpgaDestroyEventHandle(&dma_h->eh); // turn off global interrupts msgdma_ctrl_t ctrl = {0}; ctrl.ct.global_intr_en_mask = 0; res = fpgaWriteMMIO32(dma_h->fpga_h, 0, dma_h->dma_csr_base+offsetof(msgdma_csr_t, ctrl), ctrl.reg); out: free((void*)dma_h); return res; }
1
15,046
Should we provide a little more context in the error message? Maybe throw in something about what operation was being performed?
OPAE-opae-sdk
c
@@ -138,7 +138,10 @@ class PluginBase extends ServiceProviderBase */ public function registerSettings() { - return []; + $configuration = $this->getConfigurationFromYaml(); + if (array_key_exists('settings', $configuration)) { + return $configuration['settings']; + } } /**
1
<?php namespace System\Classes; use Illuminate\Support\ServiceProvider as ServiceProviderBase; use ReflectionClass; use SystemException; use Yaml; use Backend; /** * Plugin base class * * @package october\system * @author Alexey Bobkov, Samuel Georges */ class PluginBase extends ServiceProviderBase { /** * @var boolean */ protected $loadedYamlConfiguration = false; /** * @var array Plugin dependencies */ public $require = []; /** * @var boolean Determine if this plugin should have elevated privileges. */ public $elevated = false; /** * @var boolean Determine if this plugin should be loaded (false) or not (true). */ public $disabled = false; /** * Returns information about this plugin, including plugin name and developer name. * * @return array * @throws SystemException */ public function pluginDetails() { $thisClass = get_class($this); $configuration = $this->getConfigurationFromYaml(sprintf('Plugin configuration file plugin.yaml is not '. 'found for the plugin class %s. Create the file or override pluginDetails() '. 'method in the plugin class.', $thisClass)); if (!array_key_exists('plugin', $configuration)) { throw new SystemException(sprintf( 'The plugin configuration file plugin.yaml should contain the "plugin" section: %s.', $thisClass) ); } return $configuration['plugin']; } /** * Register method, called when the plugin is first registered. * * @return void */ public function register() { } /** * Boot method, called right before the request route. * * @return array */ public function boot() { } /** * Registers CMS markup tags introduced by this plugin. * * @return array */ public function registerMarkupTags() { return []; } /** * Registers any front-end components implemented in this plugin. * * @return array */ public function registerComponents() { return []; } /** * Registers back-end navigation items for this plugin. * * @return array */ public function registerNavigation() { $configuration = $this->getConfigurationFromYaml(); if (array_key_exists('navigation', $configuration)) { $navigation = $configuration['navigation']; if (is_array($navigation)) { array_walk_recursive($navigation, function(&$item, $key){ if ($key === 'url') { $item = Backend::url($item); } }); } return $navigation; } } /** * Registers any back-end permissions used by this plugin. * * @return array */ public function registerPermissions() { $configuration = $this->getConfigurationFromYaml(); if (array_key_exists('permissions', $configuration)) { return $configuration['permissions']; } } /** * Registers any back-end configuration links used by this plugin. * * @return array */ public function registerSettings() { return []; } /** * Registers scheduled tasks that are executed on a regular basis. * * @param string $schedule * @return void */ public function registerSchedule($schedule) { } /** * Registers any report widgets provided by this plugin. * The widgets must be returned in the following format: * [ * 'className1'=>[ * 'label' => 'My widget 1', * 'context' => ['context-1', 'context-2'], * ], * 'className2' => [ * 'label' => 'My widget 2', * 'context' => 'context-1' * ] * ] * @return array */ public function registerReportWidgets() { return []; } /** * Registers any form widgets implemented in this plugin. * The widgets must be returned in the following format: * ['className1' => 'alias'], * ['className2' => 'anotherAlias'] * @return array */ public function registerFormWidgets() { return []; } /** * Registers custom back-end list column types introduced by this plugin. * * @return array */ public function registerListColumnTypes() { return []; } /** * Registers any mail templates implemented by this plugin. * The templates must be returned in the following format: * ['acme.blog::mail.welcome' => 'This is a description of the welcome template'], * ['acme.blog::mail.forgot_password' => 'This is a description of the forgot password template'], * @return array */ public function registerMailTemplates() { return []; } /** * Registers a new console (artisan) command * * @param string $key The command name * @param string $class The command class * @return void */ public function registerConsoleCommand($key, $class) { $key = 'command.'.$key; $this->app[$key] = $this->app->share(function ($app) use ($class) { return new $class; }); $this->commands($key); } /** * Read configuration from YAML file * * @param string|null $exceptionMessage * @return array|bool * @throws SystemException */ protected function getConfigurationFromYaml($exceptionMessage = null) { if ($this->loadedYamlConfiguration !== false) { return $this->loadedYamlConfiguration; } $reflection = new ReflectionClass(get_class($this)); $yamlFilePath = dirname($reflection->getFileName()).'/plugin.yaml'; if (!file_exists($yamlFilePath)) { if ($exceptionMessage) { throw new SystemException($exceptionMessage); } else { $this->loadedYamlConfiguration = []; } } else { $this->loadedYamlConfiguration = Yaml::parse(file_get_contents($yamlFilePath)); if (!is_array($this->loadedYamlConfiguration)) { throw new SystemException(sprintf('Invalid format of the plugin configuration file: %s. The file should define an array.', $yamlFilePath)); } } return $this->loadedYamlConfiguration; } }
1
12,031
Project standard is spaces, not tabs. Please fix and then we can merge this.
octobercms-october
php
@@ -1,12 +1,14 @@ #include "dynamic_zone.h" -#include "expedition.h" -#include "expedition_state.h" +#include "cliententry.h" +#include "clientlist.h" +#include "dynamic_zone_manager.h" #include "worlddb.h" #include "zonelist.h" #include "zoneserver.h" #include "../common/eqemu_logsys.h" #include "../common/repositories/instance_list_repository.h" +extern ClientList client_list; extern ZSList zoneserver_list; Database& DynamicZone::GetDatabase()
1
#include "dynamic_zone.h" #include "expedition.h" #include "expedition_state.h" #include "worlddb.h" #include "zonelist.h" #include "zoneserver.h" #include "../common/eqemu_logsys.h" #include "../common/repositories/instance_list_repository.h" extern ZSList zoneserver_list; Database& DynamicZone::GetDatabase() { return database; } DynamicZone* DynamicZone::FindDynamicZoneByID(uint32_t dz_id) { auto expedition = expedition_state.GetExpeditionByDynamicZoneID(dz_id); if (expedition) { return &expedition->GetDynamicZone(); } // todo: other system caches return nullptr; } DynamicZoneStatus DynamicZone::Process() { DynamicZoneStatus status = DynamicZoneStatus::Normal; // force expire if no members if (!HasMembers() || IsExpired()) { status = DynamicZoneStatus::Expired; auto dz_zoneserver = zoneserver_list.FindByInstanceID(GetInstanceID()); if (!dz_zoneserver || dz_zoneserver->NumPlayers() == 0) // no clients inside dz { status = DynamicZoneStatus::ExpiredEmpty; if (!HasMembers() && !m_is_pending_early_shutdown && RuleB(DynamicZone, EmptyShutdownEnabled)) { SetSecondsRemaining(RuleI(DynamicZone, EmptyShutdownDelaySeconds)); m_is_pending_early_shutdown = true; } } } return status; } void DynamicZone::SetSecondsRemaining(uint32_t seconds_remaining) { auto now = std::chrono::system_clock::now(); auto new_remaining = std::chrono::seconds(seconds_remaining); auto current_remaining = m_expire_time - now; if (current_remaining > new_remaining) // reduce only { LogDynamicZonesDetail("Updating dynamic zone [{}] instance [{}] seconds remaining to [{}]s", GetID(), GetInstanceID(), seconds_remaining); // preserve original start time and adjust duration instead m_expire_time = now + new_remaining; m_duration = std::chrono::duration_cast<std::chrono::seconds>(m_expire_time - m_start_time); InstanceListRepository::UpdateDuration(database, GetInstanceID(), static_cast<uint32_t>(m_duration.count())); SendZonesDurationUpdate(); // update zone caches and actual instance's timer } } void DynamicZone::SendZonesDurationUpdate() { constexpr uint32_t packsize = sizeof(ServerDzSetDuration_Struct); auto pack = std::make_unique<ServerPacket>(ServerOP_DzDurationUpdate, packsize); auto packbuf = reinterpret_cast<ServerDzSetDuration_Struct*>(pack->pBuffer); packbuf->dz_id = GetID(); packbuf->seconds = static_cast<uint32_t>(m_duration.count()); zoneserver_list.SendPacket(pack.get()); } void DynamicZone::HandleZoneMessage(ServerPacket* pack) { switch (pack->opcode) { case ServerOP_DzSetCompass: case ServerOP_DzSetSafeReturn: case ServerOP_DzSetZoneIn: { auto buf = reinterpret_cast<ServerDzLocation_Struct*>(pack->pBuffer); auto dz = DynamicZone::FindDynamicZoneByID(buf->dz_id); if (dz) { if (pack->opcode == ServerOP_DzSetCompass) { dz->SetCompass(buf->zone_id, buf->x, buf->y, buf->z, false); } else if (pack->opcode == ServerOP_DzSetSafeReturn) { dz->SetSafeReturn(buf->zone_id, buf->x, buf->y, buf->z, buf->heading, false); } else if (pack->opcode == ServerOP_DzSetZoneIn) { dz->SetZoneInLocation(buf->x, buf->y, buf->z, buf->heading, false); } } zoneserver_list.SendPacket(pack); break; } case ServerOP_DzAddRemoveCharacter: case ServerOP_DzRemoveAllCharacters: { auto buf = reinterpret_cast<ServerDzCharacter_Struct*>(pack->pBuffer); ZoneServer* instance_zs = zoneserver_list.FindByInstanceID(buf->instance_id); if (instance_zs) { instance_zs->SendPacket(pack); } break; } case ServerOP_DzSetSecondsRemaining: { auto buf = reinterpret_cast<ServerDzSetDuration_Struct*>(pack->pBuffer); auto dz = DynamicZone::FindDynamicZoneByID(buf->dz_id); if (dz) { dz->SetSecondsRemaining(buf->seconds); } break; } }; } void DynamicZone::SendInstanceAddRemoveCharacter(uint32_t character_id, bool remove) { ZoneServer* instance_zs = zoneserver_list.FindByInstanceID(GetInstanceID()); if (instance_zs) { auto pack = CreateServerAddRemoveCharacterPacket(character_id, remove); instance_zs->SendPacket(pack.get()); } } void DynamicZone::SendInstanceRemoveAllCharacters() { ZoneServer* instance_zs = zoneserver_list.FindByInstanceID(GetInstanceID()); if (instance_zs) { auto pack = CreateServerRemoveAllCharactersPacket(); instance_zs->SendPacket(pack.get()); } } void DynamicZone::SendGlobalLocationChange(uint16_t server_opcode, const DynamicZoneLocation& location) { auto pack = CreateServerDzLocationPacket(server_opcode, location); zoneserver_list.SendPacket(pack.get()); }
1
10,537
@hgtw is there anything we're worried about with regards to using this cache? Do we want to do staged testing around expeditions before we merge? Curious your thoughts here
EQEmu-Server
cpp
@@ -226,11 +226,11 @@ public class ManifestReader extends CloseableGroup implements Filterable<Filtere @Override public Iterator<DataFile> iterator() { - return iterator(alwaysTrue(), fileSchema); + return iterator(fileSchema); } // visible for use by PartialManifest - Iterator<DataFile> iterator(Expression partFilter, Schema fileProjection) { + Iterator<DataFile> iterator(Schema fileProjection) { return Iterables.transform(Iterables.filter( entries(fileProjection), entry -> entry.status() != ManifestEntry.Status.DELETED),
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import java.io.IOException; import java.util.Collection; import java.util.Iterator; import java.util.List; import java.util.Map; import org.apache.iceberg.avro.Avro; import org.apache.iceberg.avro.AvroIterable; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.expressions.Expression; import org.apache.iceberg.io.CloseableGroup; import org.apache.iceberg.io.CloseableIterable; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.types.Types; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import static org.apache.iceberg.expressions.Expressions.alwaysTrue; /** * Reader for manifest files. * <p> * Readers are created using the builder from {@link #read(InputFile, Map)}. */ public class ManifestReader extends CloseableGroup implements Filterable<FilteredManifest> { private static final Logger LOG = LoggerFactory.getLogger(ManifestReader.class); static final ImmutableList<String> ALL_COLUMNS = ImmutableList.of("*"); static final ImmutableList<String> CHANGE_COLUMNS = ImmutableList.of( "file_path", "file_format", "partition", "record_count", "file_size_in_bytes"); static final ImmutableList<String> CHANGE_WITH_STATS_COLUMNS = ImmutableList.<String>builder() .addAll(CHANGE_COLUMNS) .add("value_counts", "null_value_counts", "lower_bounds", "upper_bounds") .build(); /** * Returns a new {@link ManifestReader} for an {@link InputFile}. * <p> * <em>Note:</em> Most callers should use {@link #read(InputFile, Map)} to ensure that the * schema used by filters is the latest table schema. This should be used only when reading a * manifest without filters. * * @param file an InputFile * @return a manifest reader */ public static ManifestReader read(InputFile file) { return new ManifestReader(file, null); } /** * Returns a new {@link ManifestReader} for an {@link InputFile}. * * @param file an InputFile * @param specsById a Map from spec ID to partition spec * @return a manifest reader */ public static ManifestReader read(InputFile file, Map<Integer, PartitionSpec> specsById) { return new ManifestReader(file, specsById); } private final InputFile file; private final Map<String, String> metadata; private final PartitionSpec spec; private final Schema fileSchema; // lazily initialized private List<ManifestEntry> cachedAdds = null; private List<ManifestEntry> cachedDeletes = null; private ManifestReader(InputFile file, Map<Integer, PartitionSpec> specsById) { this.file = file; try { try (AvroIterable<ManifestEntry> headerReader = Avro.read(file) .project(ManifestEntry.getSchema(Types.StructType.of()).select("status")) .build()) { this.metadata = headerReader.getMetadata(); } } catch (IOException e) { throw new RuntimeIOException(e); } int specId = TableMetadata.INITIAL_SPEC_ID; String specProperty = metadata.get("partition-spec-id"); if (specProperty != null) { specId = Integer.parseInt(specProperty); } if (specsById != null) { this.spec = specsById.get(specId); } else { Schema schema = SchemaParser.fromJson(metadata.get("schema")); this.spec = PartitionSpecParser.fromJsonFields(schema, specId, metadata.get("partition-spec")); } this.fileSchema = new Schema(DataFile.getType(spec.partitionType()).fields()); } public InputFile file() { return file; } public Schema schema() { return fileSchema; } public PartitionSpec spec() { return spec; } @Override public FilteredManifest select(Collection<String> columns) { return new FilteredManifest(this, alwaysTrue(), alwaysTrue(), fileSchema, columns, true); } @Override public FilteredManifest project(Schema fileProjection) { return new FilteredManifest(this, alwaysTrue(), alwaysTrue(), fileProjection, ALL_COLUMNS, true); } @Override public FilteredManifest filterPartitions(Expression expr) { return new FilteredManifest(this, expr, alwaysTrue(), fileSchema, ALL_COLUMNS, true); } @Override public FilteredManifest filterRows(Expression expr) { return new FilteredManifest(this, alwaysTrue(), expr, fileSchema, ALL_COLUMNS, true); } @Override public FilteredManifest caseSensitive(boolean caseSensitive) { return new FilteredManifest(this, alwaysTrue(), alwaysTrue(), fileSchema, ALL_COLUMNS, caseSensitive); } public List<ManifestEntry> addedFiles() { if (cachedAdds == null) { cacheChanges(); } return cachedAdds; } public List<ManifestEntry> deletedFiles() { if (cachedDeletes == null) { cacheChanges(); } return cachedDeletes; } private void cacheChanges() { List<ManifestEntry> adds = Lists.newArrayList(); List<ManifestEntry> deletes = Lists.newArrayList(); try (CloseableIterable<ManifestEntry> entries = entries(fileSchema.select(CHANGE_COLUMNS))) { for (ManifestEntry entry : entries) { switch (entry.status()) { case ADDED: adds.add(entry.copyWithoutStats()); break; case DELETED: deletes.add(entry.copyWithoutStats()); break; default: } } } catch (IOException e) { throw new RuntimeIOException(e, "Failed to close manifest entries"); } this.cachedAdds = adds; this.cachedDeletes = deletes; } CloseableIterable<ManifestEntry> entries() { return entries(fileSchema); } CloseableIterable<ManifestEntry> entries(Schema fileProjection) { FileFormat format = FileFormat.fromFileName(file.location()); Preconditions.checkArgument(format != null, "Unable to determine format of manifest: %s", file); switch (format) { case AVRO: AvroIterable<ManifestEntry> reader = Avro.read(file) .project(ManifestEntry.wrapFileSchema(fileProjection.asStruct())) .rename("manifest_entry", ManifestEntry.class.getName()) .rename("partition", PartitionData.class.getName()) .rename("r102", PartitionData.class.getName()) .rename("data_file", GenericDataFile.class.getName()) .rename("r2", GenericDataFile.class.getName()) .reuseContainers() .build(); addCloseable(reader); return reader; default: throw new UnsupportedOperationException("Invalid format for manifest file: " + format); } } @Override public Iterator<DataFile> iterator() { return iterator(alwaysTrue(), fileSchema); } // visible for use by PartialManifest Iterator<DataFile> iterator(Expression partFilter, Schema fileProjection) { return Iterables.transform(Iterables.filter( entries(fileProjection), entry -> entry.status() != ManifestEntry.Status.DELETED), ManifestEntry::file).iterator(); } }
1
17,506
The purpose of this was to allow pushing filters into the manifest files if we ever use a format other than Parquet. I think overall this is a good change because we can add it back later. Just wanted to note why the unused argument was there.
apache-iceberg
java
@@ -24,7 +24,15 @@ module Mongoid # @since 1.0.0 def evolve(object) __evolve__(object) do |obj| - obj ? obj.to_s : obj + if obj + if obj.is_a?(::BigDecimal) && Mongoid.map_big_decimal_to_decimal128 + BSON::Decimal128.new(obj) + elsif obj.is_a?(BSON::Decimal128) && Mongoid.map_big_decimal_to_decimal128 + obj + else + obj.to_s + end + end end end end
1
# encoding: utf-8 require "bigdecimal" module Mongoid class Criteria module Queryable module Extensions # The big decimal module adds custom behaviour for Origin onto the # BigDecimal class. module BigDecimal module ClassMethods # Evolves the big decimal into a MongoDB friendly value - in this case # a string. # # @example Evolve the big decimal # BigDecimal.evolve(decimal) # # @param [ BigDecimal ] object The object to convert. # # @return [ String ] The big decimal as a string. # # @since 1.0.0 def evolve(object) __evolve__(object) do |obj| obj ? obj.to_s : obj end end end end end end end end ::BigDecimal.__send__(:extend, Mongoid::Criteria::Queryable::Extensions::BigDecimal::ClassMethods)
1
11,623
I think BSON::Decimal128 should always be serialized as Decimal128 regardless of config option.
mongodb-mongoid
rb
@@ -1,6 +1,7 @@ # frozen_string_literal: true require 'bolt/application' +require 'bolt/plan_creator' require 'bolt_spec/files'
1
# frozen_string_literal: true require 'bolt/application' require 'bolt_spec/files' describe Bolt::Application do include BoltSpec::Files let(:analytics) { double('analytics').as_null_object } let(:config) { double('config').as_null_object } let(:executor) { double('executor').as_null_object } let(:inventory) { double('inventory', get_targets: targets).as_null_object } let(:pal) { double('pal').as_null_object } let(:pdb_client) { double('pdb_client').as_null_object } let(:plugins) { double('plugins', puppetdb_client: pdb_client).as_null_object } let(:application) do described_class.new( analytics: analytics, config: config, executor: executor, inventory: inventory, pal: pal, plugins: plugins ) end let(:target) { double('target').as_null_object } let(:targets) { ['localhost'] } before(:each) do allow(application).to receive(:with_benchmark) { |&block| block.call } end describe '#apply' do let(:ast) { double('ast', body: body) } let(:body) { double('body').as_null_object } let(:code) { "notice('hello')" } let(:manifest) { '/path/to/manifest.pp' } before(:each) do allow(File).to receive(:read).with(manifest).and_return(code) end it 'errors if the manifest file does not exist' do stub_nonexistent_file(manifest) expect { application.apply(manifest, targets) }.to raise_error( Bolt::FileError, /The manifest '#{manifest}' does not exist/ ) end it 'errors if the manifest file is unreadable' do stub_unreadable_file(manifest) expect { application.apply(manifest, targets) }.to raise_error( Bolt::FileError, /The manifest '#{manifest}' is unreadable/ ) end it 'errors if the manifest file is not a file' do stub_directory(manifest) expect { application.apply(manifest, targets) }.to raise_error( Bolt::FileError, /The manifest '#{manifest}' is not a file/ ) end it 'warns if the manifest only contains definitions' do allow(body).to receive(:is_a?).with(Puppet::Pops::Model::HostClassDefinition).and_return(true) allow(pal).to receive(:parse_manifest).and_return(ast) application.apply(nil, targets, code: "notice('test')") expect(@log_output.readlines).to include( /WARN .* Manifest only contains definitions/ ) end end describe '#file_download' do let(:destination) { File.expand_path('/path/to/destination') } let(:source) { File.expand_path('/path/to/source') } it 'downloads a file' do expect(executor).to receive(:download_file).with(targets, source, destination) application.download_file(source, destination, targets) end end describe '#file_upload' do let(:destination) { File.expand_path('/path/to/destination') } let(:source) { File.expand_path('/path/to/source') } it 'uploads a file' do stub_file(source) expect(executor).to receive(:upload_file).with(targets, source, destination) application.upload_file(source, destination, targets) end it 'uploads a directory' do stub_directory(source) allow(Dir).to receive(:foreach).and_return([]) expect(executor).to receive(:upload_file).with(targets, source, destination) application.upload_file(source, destination, targets) end it 'errors if the source does not exist' do stub_nonexistent_file(source) expect { application.upload_file(source, destination, targets) }.to raise_error( Bolt::FileError, /The source file '#{source}' does not exist/ ) end it 'errors if the source is unreadable' do stub_unreadable_file(source) expect { application.upload_file(source, destination, targets) }.to raise_error( Bolt::FileError, /The source file '#{source}' is unreadable/ ) end it 'errors if a file in a subdirectory is unreadable' do child = File.join(source, 'child') stub_directory(source) stub_unreadable_file(child) allow(Dir).to receive(:foreach).with(source).and_yield('child') expect { application.upload_file(source, destination, targets) }.to raise_error( Bolt::FileError, /The source file '#{child}' is unreadable/ ) end end describe '#inventory_show' do it 'shows specified targets' do expect(inventory).to receive(:get_targets).with(targets).and_return([target]) application.show_inventory(targets) end it 'defaults to showing all targets' do expect(inventory).to receive(:get_targets).with(['all']).and_return([target]) application.show_inventory(nil) end end describe '#plan_run' do let(:plan) { 'plan' } let(:plan_info) { { 'parameters' => plan_params } } let(:plan_params) { {} } before(:each) do allow(pal).to receive(:get_plan_info).and_return(plan_info) end it 'runs a given plan' do expect(pal).to receive(:run_plan) do |plan,| expect(plan).to eq(plan) end application.run_plan(plan, targets) end context 'with TargetSpec $nodes parameter' do let(:plan_params) do { 'nodes' => { 'type' => 'TargetSpec' } } end it 'uses targets for the $nodes parameter' do expect(pal).to receive(:run_plan) do |_plan, params,| expect(params).to include('nodes' => targets.join(',')) end application.run_plan(plan, targets) end it 'does not pass empty targets to the $nodes parameter' do expect(pal).to receive(:run_plan) do |_plan, params,| expect(params).to eq({}) end application.run_plan(plan, []) end end context 'with TargetSpec $targets parameter' do let(:plan_params) do { 'targets' => { 'type' => 'TargetSpec' } } end it 'uses targets for the $targets parameter' do expect(pal).to receive(:run_plan) do |_plan, params,| expect(params).to include('targets' => targets.join(',')) end application.run_plan(plan, targets) end it 'does not pass empty targets to the $nodes parameter' do expect(pal).to receive(:run_plan) do |_plan, params,| expect(params).to eq({}) end application.run_plan(plan, []) end end context 'with TargetSpec $nodes and TargetSpec $targets parameters' do let(:plan_params) do { 'nodes' => { 'type' => 'TargetSpec' }, 'targets' => { 'type' => 'TargetSpec' } } end it 'does not use targets for either parameter' do expect(pal).to receive(:run_plan) do |_plan, params,| expect(params).not_to include('nodes', 'targets') end application.run_plan(plan, targets) expect(@log_output.readlines).to include( /WARN .* Plan parameters include both 'nodes' and 'targets'/ ) end end it 'errors if targets are specified twice' do params = { 'targets' => targets } expect { application.run_plan(plan, targets, params: params) }.to raise_error( Bolt::CLIError, /A plan's 'targets' parameter can be specified using the --targets option/ ) end end describe '#script_run' do let(:script) { '/path/to/script.sh' } it 'runs a script' do stub_file(script) expect(executor).to receive(:run_script).with(targets, script, anything, anything) application.run_script(script, targets) end it 'errors if the script does not exist' do stub_nonexistent_file(script) expect { application.run_script(script, targets) }.to raise_error( Bolt::FileError, /The script '#{script}' does not exist/ ) end it 'errors if the script is unreadable' do stub_unreadable_file(script) expect { application.run_script(script, targets) }.to raise_error( Bolt::FileError, /The script '#{script}' is unreadable/ ) end it 'errors if the script is not a file' do stub_directory(script) expect { application.run_script(script, targets) }.to raise_error( Bolt::FileError, /The script '#{script}' is not a file/ ) end end describe '#task_run' do let(:task) { 'task' } it 'runs a given task' do expect(pal).to receive(:run_task) do |task,| expect(task).to eq(task) end application.run_task(task, targets) end end end
1
19,014
We should probably just move the `require 'bolt/plan_creator'` in `Bolt::CLI` to `Bolt::Application` so it will already be loaded.
puppetlabs-bolt
rb
@@ -34,7 +34,10 @@ namespace Benchmarks private static IConfig GetConfig(Options options) { - var baseJob = Job.ShortRun; // let's use the Short Run for better first user experience ;) + var baseJob = Job.Default + .WithWarmupCount(1) // 1 warmup is enough for our purpose + .WithMaxTargetIterationCount(20); // we don't want to run more that 20 iterations + var jobs = GetJobs(options, baseJob).ToArray(); var config = DefaultConfig.Instance
1
using System.Collections.Generic; using System.Linq; using BenchmarkDotNet.Attributes; using BenchmarkDotNet.Columns; using BenchmarkDotNet.Configs; using BenchmarkDotNet.Diagnosers; using BenchmarkDotNet.Environments; using BenchmarkDotNet.Exporters; using BenchmarkDotNet.Exporters.Csv; using BenchmarkDotNet.Horology; using BenchmarkDotNet.Jobs; using BenchmarkDotNet.Running; using BenchmarkDotNet.Toolchains.CoreRt; using BenchmarkDotNet.Toolchains.CsProj; using BenchmarkDotNet.Toolchains.CustomCoreClr; using BenchmarkDotNet.Toolchains.DotNetCli; using BenchmarkDotNet.Toolchains.InProcess; using Benchmarks.Serializers; using CommandLine; namespace Benchmarks { class Program { static void Main(string[] args) => Parser.Default.ParseArguments<Options>(args) .WithParsed(RunBenchmarks) .WithNotParsed(errors => { }); // ignore the errors, the parser prints nice error message private static void RunBenchmarks(Options options) => BenchmarkSwitcher .FromAssemblyAndTypes(typeof(Program).Assembly, SerializerBenchmarks.GetTypes()) .Run(config: GetConfig(options)); private static IConfig GetConfig(Options options) { var baseJob = Job.ShortRun; // let's use the Short Run for better first user experience ;) var jobs = GetJobs(options, baseJob).ToArray(); var config = DefaultConfig.Instance .With(jobs.Any() ? jobs : new[] { baseJob }); if (options.UseMemoryDiagnoser) config = config.With(MemoryDiagnoser.Default); if (options.UseDisassemblyDiagnoser) config = config.With(DisassemblyDiagnoser.Create(DisassemblyDiagnoserConfig.Asm)); if (options.DisplayAllStatistics) config = config.With(StatisticColumn.AllStatistics); return config; } private static IEnumerable<Job> GetJobs(Options options, Job baseJob) { if (options.RunInProcess) yield return baseJob.With(InProcessToolchain.Instance); if (options.RunClr) yield return baseJob.With(Runtime.Clr); if (!string.IsNullOrEmpty(options.ClrVersion)) yield return baseJob.With(new ClrRuntime(options.ClrVersion)); if (options.RunMono) yield return baseJob.With(Runtime.Mono); if (!string.IsNullOrEmpty(options.MonoPath)) yield return baseJob.With(new MonoRuntime("Mono", options.MonoPath)); if (options.RunCoreRt) yield return baseJob.With(Runtime.CoreRT).With(CoreRtToolchain.LatestMyGetBuild); if (!string.IsNullOrEmpty(options.CoreRtVersion)) yield return baseJob.With(Runtime.CoreRT) .With(CoreRtToolchain.CreateBuilder() .UseCoreRtNuGet(options.CoreRtVersion) .AdditionalNuGetFeed("benchmarkdotnet ci", "https://ci.appveyor.com/nuget/benchmarkdotnet") .ToToolchain()); if (!string.IsNullOrEmpty(options.CoreRtPath)) yield return baseJob.With(Runtime.CoreRT) .With(CoreRtToolchain.CreateBuilder() .UseCoreRtLocal(options.CoreRtPath) .AdditionalNuGetFeed("benchmarkdotnet ci", "https://ci.appveyor.com/nuget/benchmarkdotnet") .ToToolchain()); if (options.RunCore) yield return baseJob.With(Runtime.Core).With(CsProjCoreToolchain.Current.Value); if (options.RunCore20) yield return baseJob.With(Runtime.Core).With(CsProjCoreToolchain.NetCoreApp20); if (options.RunCore21) yield return baseJob.With(Runtime.Core).With(CsProjCoreToolchain.NetCoreApp21); if (!string.IsNullOrEmpty(options.CoreFxVersion) || !string.IsNullOrEmpty(options.CoreClrVersion)) { var builder = CustomCoreClrToolchain.CreateBuilder(); if (!string.IsNullOrEmpty(options.CoreFxVersion) && !string.IsNullOrEmpty(options.CoreFxBinPackagesPath)) builder.UseCoreFxLocalBuild(options.CoreFxVersion, options.CoreFxBinPackagesPath); else if (!string.IsNullOrEmpty(options.CoreFxVersion)) builder.UseCoreFxNuGet(options.CoreFxVersion); else builder.UseCoreFxDefault(); if (!string.IsNullOrEmpty(options.CoreClrVersion) && !string.IsNullOrEmpty(options.CoreClrBinPackagesPath) && !string.IsNullOrEmpty(options.CoreClrPackagesPath)) builder.UseCoreClrLocalBuild(options.CoreClrVersion, options.CoreClrBinPackagesPath, options.CoreClrPackagesPath); else if (!string.IsNullOrEmpty(options.CoreClrVersion)) builder.UseCoreClrNuGet(options.CoreClrVersion); else builder.UseCoreClrDefault(); if (!string.IsNullOrEmpty(options.CliPath)) builder.DotNetCli(options.CliPath); builder.AdditionalNuGetFeed("benchmarkdotnet ci", "https://ci.appveyor.com/nuget/benchmarkdotnet"); yield return baseJob.With(Runtime.Core).With(builder.ToToolchain()); } } } public class Options { [Option("memory", Required = false, Default = true, HelpText = "Prints memory statistics. Enabled by default")] public bool UseMemoryDiagnoser { get; set; } [Option("disassm", Required = false, Default = false, HelpText = "Gets diassembly for benchmarked code")] public bool UseDisassemblyDiagnoser { get; set; } [Option("allStats", Required = false, Default = false, HelpText = "Displays all statistics (min, max & more")] public bool DisplayAllStatistics { get; set; } [Option("inProcess", Required = false, Default = false, HelpText = "Run benchmarks in Process")] public bool RunInProcess { get; set; } [Option("clr", Required = false, Default = false, HelpText = "Run benchmarks for Clr")] public bool RunClr { get; set; } [Option("clrVersion", Required = false, HelpText = "Optional version of private CLR build used as the value of COMPLUS_Version env var.")] public string ClrVersion { get; set; } [Option("mono", Required = false, Default = false, HelpText = "Run benchmarks for Mono (takes the default from PATH)")] public bool RunMono { get; set; } [Option("monoPath", Required = false, HelpText = "Optional path to Mono which should be used for running benchmarks.")] public string MonoPath { get; set; } [Option("coreRt", Required = false, Default = false, HelpText = "Run benchmarks for the latest CoreRT")] public bool RunCoreRt { get; set; } [Option("coreRtVersion", Required = false, HelpText = "Optional version of Microsoft.DotNet.ILCompiler which should be used to run with CoreRT. Example: \"1.0.0-alpha-26414-01\"")] public string CoreRtVersion { get; set; } [Option("ilcPath", Required = false, HelpText = "Optional IlcPath which should be used to run with private CoreRT build. Example: \"1.0.0-alpha-26414-01\"")] public string CoreRtPath { get; set; } [Option("core", Required = false, Default = false, HelpText = "Run benchmarks for .NET Core")] public bool RunCore { get; set; } [Option("core20", Required = false, Default = false, HelpText = "Run benchmarks for .NET Core 2.0")] public bool RunCore20 { get; set; } [Option("core21", Required = false, Default = false, HelpText = "Run benchmarks for .NET Core 2.1")] public bool RunCore21 { get; set; } [Option("cli", Required = false, HelpText = "Optional path to dotnet cli which should be used for running benchmarks.")] public string CliPath { get; set; } [Option("coreClrVersion", Required = false, HelpText = "Optional version of Microsoft.NETCore.Runtime which should be used. Example: \"2.1.0-preview2-26305-0\"")] public string CoreClrVersion { get; set; } [Option("coreClrBin", Required = false, HelpText = @"Optional path to folder with CoreClr NuGet packages. Example: ""C:\coreclr\bin\Product\Windows_NT.x64.Release\.nuget\pkg""")] public string CoreClrBinPackagesPath { get; set; } [Option("coreClrPackages", Required = false, HelpText = @"Optional path to folder with NuGet packages restored for CoreClr build. Example: ""C:\Projects\coreclr\packages""")] public string CoreClrPackagesPath { get; set; } [Option("coreFxVersion", Required = false, HelpText = "Optional version of Microsoft.Private.CoreFx.NETCoreApp which should be used. Example: \"4.5.0-preview2-26307-0\"")] public string CoreFxVersion { get; set; } [Option("coreFxBin", Required = false, HelpText = @"Optional path to folder with CoreFX NuGet packages, Example: ""C:\Projects\forks\corefx\bin\packages\Release""")] public string CoreFxBinPackagesPath { get; set; } } /// <summary> /// this config allows you to run benchmarks for multiple runtimes /// </summary> public class MultipleRuntimesConfig : ManualConfig { public MultipleRuntimesConfig() { Add(Job.Default.With(Runtime.Core).With(CsProjCoreToolchain.From(NetCoreAppSettings.NetCoreApp20)).AsBaseline().WithId("Core 2.0")); Add(Job.Default.With(Runtime.Core).With(CsProjCoreToolchain.From(NetCoreAppSettings.NetCoreApp21)).WithId("Core 2.1")); Add(Job.Default.With(Runtime.Clr).WithId("Clr")); Add(Job.Default.With(Runtime.Mono).WithId("Mono")); // you can comment this if you don't have Mono installed Add(Job.Default.With(Runtime.CoreRT).WithId("CoreRT")); Add(MemoryDiagnoser.Default); Add(DefaultConfig.Instance.GetValidators().ToArray()); Add(DefaultConfig.Instance.GetLoggers().ToArray()); Add(DefaultConfig.Instance.GetColumnProviders().ToArray()); Add(new CsvMeasurementsExporter(CsvSeparator.Semicolon)); //Add(RPlotExporter.Default); // it produces nice plots but requires R to be installed Add(MarkdownExporter.GitHub); Add(HtmlExporter.Default); //Add(StatisticColumn.AllStatistics); Set(new BenchmarkDotNet.Reports.SummaryStyle { PrintUnitsInHeader = true, PrintUnitsInContent = false, TimeUnit = TimeUnit.Microsecond, SizeUnit = SizeUnit.B }); } } }
1
7,149
>20 [](start = 45, length = 2) Can we override this at runtime? Maybe it should be a command line option with default.
dotnet-performance
.cs
@@ -47,7 +47,7 @@ class Service(object): self.start_error_message = start_error_message self.log_file = log_file self.env = env or os.environ - + @property def service_url(self): """
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import errno import os import platform import subprocess from subprocess import PIPE import time from selenium.common.exceptions import WebDriverException from selenium.webdriver.common import utils try: from subprocess import DEVNULL _HAS_NATIVE_DEVNULL = True except ImportError: DEVNULL = -3 _HAS_NATIVE_DEVNULL = False class Service(object): def __init__(self, executable, port=0, log_file=DEVNULL, env=None, start_error_message=""): self.path = executable self.port = port if self.port == 0: self.port = utils.free_port() if not _HAS_NATIVE_DEVNULL and log_file == DEVNULL: log_file = open(os.devnull, 'wb') self.start_error_message = start_error_message self.log_file = log_file self.env = env or os.environ @property def service_url(self): """ Gets the url of the Service """ return "http://%s" % utils.join_host_port('localhost', self.port) def command_line_args(self): raise NotImplemented("This method needs to be implemented in a sub class") def start(self): """ Starts the Service. :Exceptions: - WebDriverException : Raised either when it can't start the service or when it can't connect to the service """ try: cmd = [self.path] cmd.extend(self.command_line_args()) self.process = subprocess.Popen(cmd, env=self.env, close_fds=platform.system() != 'Windows', stdout=self.log_file, stderr=self.log_file, stdin=PIPE) except TypeError: raise except OSError as err: if err.errno == errno.ENOENT: raise WebDriverException( "'%s' executable needs to be in PATH. %s" % ( os.path.basename(self.path), self.start_error_message) ) elif err.errno == errno.EACCES: raise WebDriverException( "'%s' executable may have wrong permissions. %s" % ( os.path.basename(self.path), self.start_error_message) ) else: raise except Exception as e: raise WebDriverException( "The executable %s needs to be available in the path. %s\n%s" % (os.path.basename(self.path), self.start_error_message, str(e))) count = 0 while True: self.assert_process_still_running() if self.is_connectable(): break count += 1 time.sleep(1) if count == 30: raise WebDriverException("Can not connect to the Service %s" % self.path) def assert_process_still_running(self): return_code = self.process.poll() if return_code is not None: raise WebDriverException( 'Service %s unexpectedly exited. Status code was: %s' % (self.path, return_code) ) def is_connectable(self): return utils.is_connectable(self.port) def send_remote_shutdown_command(self): try: from urllib import request as url_request URLError = url_request.URLError except ImportError: import urllib2 as url_request import urllib2 URLError = urllib2.URLError try: url_request.urlopen("%s/shutdown" % self.service_url) except URLError: return for x in range(30): if not self.is_connectable(): break else: time.sleep(1) def stop(self): """ Stops the service. """ if self.log_file != PIPE and not (self.log_file == DEVNULL and _HAS_NATIVE_DEVNULL): try: self.log_file.close() except Exception: pass if self.process is None: return try: self.send_remote_shutdown_command() except TypeError: pass try: if self.process: for stream in [self.process.stdin, self.process.stdout, self.process.stderr]: try: stream.close() except AttributeError: pass self.process.terminate() self.process.wait() self.process.kill() self.process = None except OSError: pass def __del__(self): # `subprocess.Popen` doesn't send signal on `__del__`; # so we attempt to close the launched process when `__del__` # is triggered. try: self.stop() except Exception: pass
1
15,260
can just remove it ?
SeleniumHQ-selenium
java
@@ -654,7 +654,17 @@ func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) { return repo, UpdateRepository(repo, false) } - if err = createUpdateHook(repoPath); err != nil { + repo, err = FinishMigrateRepository(repo, repoPath) + if err != nil { + return repo, err + } + + return repo, UpdateRepository(repo, false) +} + +// Finish migrating repository with things that don't need to be done for mirrors. +func FinishMigrateRepository(repo *Repository, repoPath string) (*Repository, error) { + if err := createUpdateHook(repoPath); err != nil { return repo, fmt.Errorf("createUpdateHook: %v", err) }
1
// Copyright 2014 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package models import ( "bytes" "errors" "fmt" "html/template" "io/ioutil" "os" "os/exec" "path" "path/filepath" "regexp" "sort" "strings" "sync" "time" "unicode/utf8" "github.com/Unknwon/cae/zip" "github.com/Unknwon/com" "github.com/go-xorm/xorm" "github.com/mcuadros/go-version" "gopkg.in/ini.v1" "github.com/gogits/git-module" api "github.com/gogits/go-gogs-client" "github.com/gogits/gogs/modules/base" "github.com/gogits/gogs/modules/bindata" "github.com/gogits/gogs/modules/log" "github.com/gogits/gogs/modules/process" "github.com/gogits/gogs/modules/setting" ) const ( _TPL_UPDATE_HOOK = "#!/usr/bin/env %s\n%s update $1 $2 $3 --config='%s'\n" ) var ( ErrRepoFileNotExist = errors.New("Repository file does not exist") ErrRepoFileNotLoaded = errors.New("Repository file not loaded") ErrMirrorNotExist = errors.New("Mirror does not exist") ErrInvalidReference = errors.New("Invalid reference specified") ErrNameEmpty = errors.New("Name is empty") ) var ( Gitignores, Licenses, Readmes []string // Maximum items per page in forks, watchers and stars of a repo ItemsPerPage = 40 ) func LoadRepoConfig() { // Load .gitignore and license files and readme templates. types := []string{"gitignore", "license", "readme"} typeFiles := make([][]string, 3) for i, t := range types { files, err := bindata.AssetDir("conf/" + t) if err != nil { log.Fatal(4, "Fail to get %s files: %v", t, err) } customPath := path.Join(setting.CustomPath, "conf", t) if com.IsDir(customPath) { customFiles, err := com.StatDir(customPath) if err != nil { log.Fatal(4, "Fail to get custom %s files: %v", t, err) } for _, f := range customFiles { if !com.IsSliceContainsStr(files, f) { files = append(files, f) } } } typeFiles[i] = files } Gitignores = typeFiles[0] Licenses = typeFiles[1] Readmes = typeFiles[2] sort.Strings(Gitignores) sort.Strings(Licenses) sort.Strings(Readmes) } func NewRepoContext() { zip.Verbose = false // Check Git installation. if _, err := exec.LookPath("git"); err != nil { log.Fatal(4, "Fail to test 'git' command: %v (forgotten install?)", err) } // Check Git version. gitVer, err := git.BinVersion() if err != nil { log.Fatal(4, "Fail to get Git version: %v", err) } log.Info("Git Version: %s", gitVer) if version.Compare("1.7.1", gitVer, ">") { log.Fatal(4, "Gogs requires Git version greater or equal to 1.7.1") } // Git requires setting user.name and user.email in order to commit changes. for configKey, defaultValue := range map[string]string{"user.name": "Gogs", "user.email": "[email protected]"} { if stdout, stderr, err := process.Exec("NewRepoContext(get setting)", "git", "config", "--get", configKey); err != nil || strings.TrimSpace(stdout) == "" { // ExitError indicates this config is not set if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" { if _, stderr, gerr := process.Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil { log.Fatal(4, "Fail to set git %s(%s): %s", configKey, gerr, stderr) } log.Info("Git config %s set to %s", configKey, defaultValue) } else { log.Fatal(4, "Fail to get git %s(%s): %s", configKey, err, stderr) } } } // Set git some configurations. if _, stderr, err := process.Exec("NewRepoContext(git config --global core.quotepath false)", "git", "config", "--global", "core.quotepath", "false"); err != nil { log.Fatal(4, "Fail to execute 'git config --global core.quotepath false': %s", stderr) } // Clean up temporary data. os.RemoveAll(filepath.Join(setting.AppDataPath, "tmp")) } // Repository represents a git repository. type Repository struct { ID int64 `xorm:"pk autoincr"` OwnerID int64 `xorm:"UNIQUE(s)"` Owner *User `xorm:"-"` LowerName string `xorm:"UNIQUE(s) INDEX NOT NULL"` Name string `xorm:"INDEX NOT NULL"` Description string Website string DefaultBranch string NumWatches int NumStars int NumForks int NumIssues int NumClosedIssues int NumOpenIssues int `xorm:"-"` NumPulls int NumClosedPulls int NumOpenPulls int `xorm:"-"` NumMilestones int `xorm:"NOT NULL DEFAULT 0"` NumClosedMilestones int `xorm:"NOT NULL DEFAULT 0"` NumOpenMilestones int `xorm:"-"` NumTags int `xorm:"-"` IsPrivate bool IsBare bool IsMirror bool *Mirror `xorm:"-"` // Advanced settings EnableWiki bool `xorm:"NOT NULL DEFAULT true"` EnableExternalWiki bool ExternalWikiURL string EnableIssues bool `xorm:"NOT NULL DEFAULT true"` EnableExternalTracker bool ExternalTrackerFormat string ExternalMetas map[string]string `xorm:"-"` EnablePulls bool `xorm:"NOT NULL DEFAULT true"` IsFork bool `xorm:"NOT NULL DEFAULT false"` ForkID int64 BaseRepo *Repository `xorm:"-"` Created time.Time `xorm:"CREATED"` Updated time.Time `xorm:"UPDATED"` } func (repo *Repository) AfterSet(colName string, _ xorm.Cell) { switch colName { case "num_closed_issues": repo.NumOpenIssues = repo.NumIssues - repo.NumClosedIssues case "num_closed_pulls": repo.NumOpenPulls = repo.NumPulls - repo.NumClosedPulls case "num_closed_milestones": repo.NumOpenMilestones = repo.NumMilestones - repo.NumClosedMilestones case "updated": repo.Updated = regulateTimeZone(repo.Updated) } } func (repo *Repository) getOwner(e Engine) (err error) { if repo.Owner != nil { return nil } repo.Owner, err = getUserByID(e, repo.OwnerID) return err } func (repo *Repository) GetOwner() error { return repo.getOwner(x) } func (repo *Repository) mustOwner(e Engine) *User { if err := repo.getOwner(e); err != nil { return &User{ Name: "error", FullName: err.Error(), } } return repo.Owner } // MustOwner always returns a valid *User object to avoid // conceptually impossible error handling. // It creates a fake object that contains error deftail // when error occurs. func (repo *Repository) MustOwner() *User { return repo.mustOwner(x) } // ComposeMetas composes a map of metas for rendering external issue tracker URL. func (repo *Repository) ComposeMetas() map[string]string { if !repo.EnableExternalTracker { return nil } else if repo.ExternalMetas == nil { repo.ExternalMetas = map[string]string{ "format": repo.ExternalTrackerFormat, "user": repo.MustOwner().Name, "repo": repo.Name, } } return repo.ExternalMetas } // GetAssignees returns all users that have write access of repository. func (repo *Repository) GetAssignees() (_ []*User, err error) { if err = repo.GetOwner(); err != nil { return nil, err } accesses := make([]*Access, 0, 10) if err = x.Where("repo_id=? AND mode>=?", repo.ID, ACCESS_MODE_WRITE).Find(&accesses); err != nil { return nil, err } users := make([]*User, 0, len(accesses)+1) // Just waste 1 unit does not matter. if !repo.Owner.IsOrganization() { users = append(users, repo.Owner) } var u *User for i := range accesses { u, err = GetUserByID(accesses[i].UserID) if err != nil { return nil, err } users = append(users, u) } return users, nil } // GetAssigneeByID returns the user that has write access of repository by given ID. func (repo *Repository) GetAssigneeByID(userID int64) (*User, error) { return GetAssigneeByID(repo, userID) } // GetMilestoneByID returns the milestone belongs to repository by given ID. func (repo *Repository) GetMilestoneByID(milestoneID int64) (*Milestone, error) { return GetRepoMilestoneByID(repo.ID, milestoneID) } // IssueStats returns number of open and closed repository issues by given filter mode. func (repo *Repository) IssueStats(uid int64, filterMode int, isPull bool) (int64, int64) { return GetRepoIssueStats(repo.ID, uid, filterMode, isPull) } func (repo *Repository) GetMirror() (err error) { repo.Mirror, err = GetMirror(repo.ID) return err } func (repo *Repository) GetBaseRepo() (err error) { if !repo.IsFork { return nil } repo.BaseRepo, err = GetRepositoryByID(repo.ForkID) return err } func (repo *Repository) repoPath(e Engine) string { return RepoPath(repo.mustOwner(e).Name, repo.Name) } func (repo *Repository) RepoPath() string { return repo.repoPath(x) } func (repo *Repository) GitConfigPath() string { return filepath.Join(repo.RepoPath(), "config") } func (repo *Repository) RepoLink() string { return setting.AppSubUrl + "/" + repo.MustOwner().Name + "/" + repo.Name } func (repo *Repository) ComposeCompareURL(oldCommitID, newCommitID string) string { return fmt.Sprintf("%s/%s/compare/%s...%s", repo.MustOwner().Name, repo.Name, oldCommitID, newCommitID) } func (repo *Repository) FullRepoLink() string { return setting.AppUrl + repo.MustOwner().Name + "/" + repo.Name } func (repo *Repository) HasAccess(u *User) bool { has, _ := HasAccess(u, repo, ACCESS_MODE_READ) return has } func (repo *Repository) IsOwnedBy(userID int64) bool { return repo.OwnerID == userID } // CanBeForked returns true if repository meets the requirements of being forked. func (repo *Repository) CanBeForked() bool { return !repo.IsBare && !repo.IsMirror } func (repo *Repository) NextIssueIndex() int64 { return int64(repo.NumIssues+repo.NumPulls) + 1 } var ( DescPattern = regexp.MustCompile(`https?://\S+`) ) // DescriptionHtml does special handles to description and return HTML string. func (repo *Repository) DescriptionHtml() template.HTML { sanitize := func(s string) string { return fmt.Sprintf(`<a href="%[1]s" target="_blank">%[1]s</a>`, s) } return template.HTML(DescPattern.ReplaceAllStringFunc(base.Sanitizer.Sanitize(repo.Description), sanitize)) } func (repo *Repository) LocalCopyPath() string { return path.Join(setting.AppDataPath, "tmp/local", com.ToStr(repo.ID)) } func updateLocalCopy(repoPath, localPath string) error { if !com.IsExist(localPath) { if err := git.Clone(repoPath, localPath, git.CloneRepoOptions{}); err != nil { return fmt.Errorf("Clone: %v", err) } } else { if err := git.Pull(localPath, true); err != nil { return fmt.Errorf("Pull: %v", err) } } return nil } // UpdateLocalCopy makes sure the local copy of repository is up-to-date. func (repo *Repository) UpdateLocalCopy() error { return updateLocalCopy(repo.RepoPath(), repo.LocalCopyPath()) } // PatchPath returns corresponding patch file path of repository by given issue ID. func (repo *Repository) PatchPath(index int64) (string, error) { if err := repo.GetOwner(); err != nil { return "", err } return filepath.Join(RepoPath(repo.Owner.Name, repo.Name), "pulls", com.ToStr(index)+".patch"), nil } // SavePatch saves patch data to corresponding location by given issue ID. func (repo *Repository) SavePatch(index int64, patch []byte) error { patchPath, err := repo.PatchPath(index) if err != nil { return fmt.Errorf("PatchPath: %v", err) } os.MkdirAll(filepath.Dir(patchPath), os.ModePerm) if err = ioutil.WriteFile(patchPath, patch, 0644); err != nil { return fmt.Errorf("WriteFile: %v", err) } return nil } // ComposePayload composes and returns *api.PayloadRepo corresponding to the repository. func (repo *Repository) ComposePayload() *api.PayloadRepo { cl := repo.CloneLink() return &api.PayloadRepo{ ID: repo.ID, Name: repo.Name, URL: repo.FullRepoLink(), SSHURL: cl.SSH, CloneURL: cl.HTTPS, Description: repo.Description, Website: repo.Website, Watchers: repo.NumWatches, Owner: &api.PayloadAuthor{ Name: repo.MustOwner().DisplayName(), Email: repo.MustOwner().Email, UserName: repo.MustOwner().Name, }, Private: repo.IsPrivate, } } func isRepositoryExist(e Engine, u *User, repoName string) (bool, error) { has, err := e.Get(&Repository{ OwnerID: u.Id, LowerName: strings.ToLower(repoName), }) return has && com.IsDir(RepoPath(u.Name, repoName)), err } // IsRepositoryExist returns true if the repository with given name under user has already existed. func IsRepositoryExist(u *User, repoName string) (bool, error) { return isRepositoryExist(x, u, repoName) } // CloneLink represents different types of clone URLs of repository. type CloneLink struct { SSH string HTTPS string Git string } func (repo *Repository) cloneLink(isWiki bool) *CloneLink { repoName := repo.Name if isWiki { repoName += ".wiki" } repo.Owner = repo.MustOwner() cl := new(CloneLink) if setting.SSHPort != 22 { cl.SSH = fmt.Sprintf("ssh://%s@%s:%d/%s/%s.git", setting.RunUser, setting.SSHDomain, setting.SSHPort, repo.Owner.Name, repoName) } else { cl.SSH = fmt.Sprintf("%s@%s:%s/%s.git", setting.RunUser, setting.SSHDomain, repo.Owner.Name, repoName) } cl.HTTPS = fmt.Sprintf("%s%s/%s.git", setting.AppUrl, repo.Owner.Name, repoName) return cl } // CloneLink returns clone URLs of repository. func (repo *Repository) CloneLink() (cl *CloneLink) { return repo.cloneLink(false) } var ( reservedNames = []string{"debug", "raw", "install", "api", "avatar", "user", "org", "help", "stars", "issues", "pulls", "commits", "repo", "template", "admin", "new"} reservedPatterns = []string{"*.git", "*.keys", "*.wiki"} ) // IsUsableName checks if name is reserved or pattern of name is not allowed. func IsUsableName(name string) error { name = strings.TrimSpace(strings.ToLower(name)) if utf8.RuneCountInString(name) == 0 { return ErrNameEmpty } for i := range reservedNames { if name == reservedNames[i] { return ErrNameReserved{name} } } for _, pat := range reservedPatterns { if pat[0] == '*' && strings.HasSuffix(name, pat[1:]) || (pat[len(pat)-1] == '*' && strings.HasPrefix(name, pat[:len(pat)-1])) { return ErrNamePatternNotAllowed{pat} } } return nil } // Mirror represents a mirror information of repository. type Mirror struct { ID int64 `xorm:"pk autoincr"` RepoID int64 Repo *Repository `xorm:"-"` Interval int // Hour. Updated time.Time `xorm:"UPDATED"` NextUpdate time.Time address string `xorm:"-"` } func (m *Mirror) AfterSet(colName string, _ xorm.Cell) { var err error switch colName { case "repo_id": m.Repo, err = GetRepositoryByID(m.RepoID) if err != nil { log.Error(3, "GetRepositoryByID[%d]: %v", m.ID, err) } } } func (m *Mirror) readAddress() { if len(m.address) > 0 { return } cfg, err := ini.Load(m.Repo.GitConfigPath()) if err != nil { log.Error(4, "Load: %v", err) return } m.address = cfg.Section("remote \"origin\"").Key("url").Value() } // HandleCloneUserCredentials replaces user credentials from HTTP/HTTPS URL // with placeholder <credentials>. // It will fail for any other forms of clone addresses. func HandleCloneUserCredentials(url string, mosaics bool) string { i := strings.Index(url, "@") if i == -1 { return url } start := strings.Index(url, "://") if start == -1 { return url } if mosaics { return url[:start+3] + "<credentials>" + url[i:] } return url[:start+3] + url[i+1:] } // Address returns mirror address from Git repository config without credentials. func (m *Mirror) Address() string { m.readAddress() return HandleCloneUserCredentials(m.address, false) } // FullAddress returns mirror address from Git repository config. func (m *Mirror) FullAddress() string { m.readAddress() return m.address } // SaveAddress writes new address to Git repository config. func (m *Mirror) SaveAddress(addr string) error { configPath := m.Repo.GitConfigPath() cfg, err := ini.Load(configPath) if err != nil { return fmt.Errorf("Load: %v", err) } cfg.Section("remote \"origin\"").Key("url").SetValue(addr) return cfg.SaveToIndent(configPath, "\t") } func getMirror(e Engine, repoId int64) (*Mirror, error) { m := &Mirror{RepoID: repoId} has, err := e.Get(m) if err != nil { return nil, err } else if !has { return nil, ErrMirrorNotExist } return m, nil } // GetMirror returns mirror object by given repository ID. func GetMirror(repoId int64) (*Mirror, error) { return getMirror(x, repoId) } func updateMirror(e Engine, m *Mirror) error { _, err := e.Id(m.ID).Update(m) return err } func UpdateMirror(m *Mirror) error { return updateMirror(x, m) } func createUpdateHook(repoPath string) error { return git.SetUpdateHook(repoPath, fmt.Sprintf(_TPL_UPDATE_HOOK, setting.ScriptType, "\""+setting.AppPath+"\"", setting.CustomConf)) } type MigrateRepoOptions struct { Name string Description string IsPrivate bool IsMirror bool RemoteAddr string } // MigrateRepository migrates a existing repository from other project hosting. func MigrateRepository(u *User, opts MigrateRepoOptions) (*Repository, error) { repo, err := CreateRepository(u, CreateRepoOptions{ Name: opts.Name, Description: opts.Description, IsPrivate: opts.IsPrivate, IsMirror: opts.IsMirror, }) if err != nil { return nil, err } // Clone to temprory path and do the init commit. tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("%d", time.Now().Nanosecond())) os.MkdirAll(tmpDir, os.ModePerm) repoPath := RepoPath(u.Name, opts.Name) if u.IsOrganization() { t, err := u.GetOwnerTeam() if err != nil { return nil, err } repo.NumWatches = t.NumMembers } else { repo.NumWatches = 1 } os.RemoveAll(repoPath) if err = git.Clone(opts.RemoteAddr, repoPath, git.CloneRepoOptions{ Mirror: true, Quiet: true, Timeout: 10 * time.Minute, }); err != nil { return repo, fmt.Errorf("Clone: %v", err) } if opts.IsMirror { if _, err = x.InsertOne(&Mirror{ RepoID: repo.ID, Interval: 24, NextUpdate: time.Now().Add(24 * time.Hour), }); err != nil { return repo, fmt.Errorf("InsertOne: %v", err) } repo.IsMirror = true return repo, UpdateRepository(repo, false) } if err = createUpdateHook(repoPath); err != nil { return repo, fmt.Errorf("createUpdateHook: %v", err) } // Clean up mirror info which prevents "push --all". // This also removes possible user credentials. configPath := repo.GitConfigPath() cfg, err := ini.Load(configPath) if err != nil { return repo, fmt.Errorf("open config file: %v", err) } cfg.DeleteSection("remote \"origin\"") if err = cfg.SaveToIndent(configPath, "\t"); err != nil { return repo, fmt.Errorf("save config file: %v", err) } // Check if repository is empty. _, stderr, err := com.ExecCmdDir(repoPath, "git", "log", "-1") if err != nil { if strings.Contains(stderr, "fatal: bad default revision 'HEAD'") { repo.IsBare = true } else { return repo, fmt.Errorf("check bare: %v - %s", err, stderr) } } // Try to get HEAD branch and set it as default branch. gitRepo, err := git.OpenRepository(repoPath) if err != nil { log.Error(4, "OpenRepository: %v", err) return repo, nil } headBranch, err := gitRepo.GetHEADBranch() if err != nil { log.Error(4, "GetHEADBranch: %v", err) return repo, nil } if headBranch != nil { repo.DefaultBranch = headBranch.Name } return repo, UpdateRepository(repo, false) } // initRepoCommit temporarily changes with work directory. func initRepoCommit(tmpPath string, sig *git.Signature) (err error) { var stderr string if _, stderr, err = process.ExecDir(-1, tmpPath, fmt.Sprintf("initRepoCommit (git add): %s", tmpPath), "git", "add", "--all"); err != nil { return fmt.Errorf("git add: %s", stderr) } if _, stderr, err = process.ExecDir(-1, tmpPath, fmt.Sprintf("initRepoCommit (git commit): %s", tmpPath), "git", "commit", fmt.Sprintf("--author='%s <%s>'", sig.Name, sig.Email), "-m", "initial commit"); err != nil { return fmt.Errorf("git commit: %s", stderr) } if _, stderr, err = process.ExecDir(-1, tmpPath, fmt.Sprintf("initRepoCommit (git push): %s", tmpPath), "git", "push", "origin", "master"); err != nil { return fmt.Errorf("git push: %s", stderr) } return nil } type CreateRepoOptions struct { Name string Description string Gitignores string License string Readme string IsPrivate bool IsMirror bool AutoInit bool } func getRepoInitFile(tp, name string) ([]byte, error) { relPath := path.Join("conf", tp, name) // Use custom file when available. customPath := path.Join(setting.CustomPath, relPath) if com.IsFile(customPath) { return ioutil.ReadFile(customPath) } return bindata.Asset(relPath) } func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error { // Clone to temprory path and do the init commit. _, stderr, err := process.Exec( fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir) if err != nil { return fmt.Errorf("git clone: %v - %s", err, stderr) } // README data, err := getRepoInitFile("readme", opts.Readme) if err != nil { return fmt.Errorf("getRepoInitFile[%s]: %v", opts.Readme, err) } cloneLink := repo.CloneLink() match := map[string]string{ "Name": repo.Name, "Description": repo.Description, "CloneURL.SSH": cloneLink.SSH, "CloneURL.HTTPS": cloneLink.HTTPS, } if err = ioutil.WriteFile(filepath.Join(tmpDir, "README.md"), []byte(com.Expand(string(data), match)), 0644); err != nil { return fmt.Errorf("write README.md: %v", err) } // .gitignore if len(opts.Gitignores) > 0 { var buf bytes.Buffer names := strings.Split(opts.Gitignores, ",") for _, name := range names { data, err = getRepoInitFile("gitignore", name) if err != nil { return fmt.Errorf("getRepoInitFile[%s]: %v", name, err) } buf.WriteString("# ---> " + name + "\n") buf.Write(data) buf.WriteString("\n") } if buf.Len() > 0 { if err = ioutil.WriteFile(filepath.Join(tmpDir, ".gitignore"), buf.Bytes(), 0644); err != nil { return fmt.Errorf("write .gitignore: %v", err) } } } // LICENSE if len(opts.License) > 0 { data, err = getRepoInitFile("license", opts.License) if err != nil { return fmt.Errorf("getRepoInitFile[%s]: %v", opts.License, err) } if err = ioutil.WriteFile(filepath.Join(tmpDir, "LICENSE"), data, 0644); err != nil { return fmt.Errorf("write LICENSE: %v", err) } } return nil } // InitRepository initializes README and .gitignore if needed. func initRepository(e Engine, repoPath string, u *User, repo *Repository, opts CreateRepoOptions) (err error) { // Somehow the directory could exist. if com.IsExist(repoPath) { return fmt.Errorf("initRepository: path already exists: %s", repoPath) } // Init bare new repository. if err = git.InitRepository(repoPath, true); err != nil { return fmt.Errorf("InitRepository: %v", err) } else if err = createUpdateHook(repoPath); err != nil { return fmt.Errorf("createUpdateHook: %v", err) } tmpDir := filepath.Join(os.TempDir(), "gogs-"+repo.Name+"-"+com.ToStr(time.Now().Nanosecond())) // Initialize repository according to user's choice. if opts.AutoInit { os.MkdirAll(tmpDir, os.ModePerm) defer os.RemoveAll(tmpDir) if err = prepareRepoCommit(repo, tmpDir, repoPath, opts); err != nil { return fmt.Errorf("prepareRepoCommit: %v", err) } // Apply changes and commit. if err = initRepoCommit(tmpDir, u.NewGitSig()); err != nil { return fmt.Errorf("initRepoCommit: %v", err) } } // Re-fetch the repository from database before updating it (else it would // override changes that were done earlier with sql) if repo, err = getRepositoryByID(e, repo.ID); err != nil { return fmt.Errorf("getRepositoryByID: %v", err) } if !opts.AutoInit { repo.IsBare = true } repo.DefaultBranch = "master" if err = updateRepository(e, repo, false); err != nil { return fmt.Errorf("updateRepository: %v", err) } return nil } func createRepository(e *xorm.Session, u *User, repo *Repository) (err error) { if err = IsUsableName(repo.Name); err != nil { return err } has, err := isRepositoryExist(e, u, repo.Name) if err != nil { return fmt.Errorf("IsRepositoryExist: %v", err) } else if has { return ErrRepoAlreadyExist{u.Name, repo.Name} } if _, err = e.Insert(repo); err != nil { return err } u.NumRepos++ // Remember visibility preference. u.LastRepoVisibility = repo.IsPrivate if err = updateUser(e, u); err != nil { return fmt.Errorf("updateUser: %v", err) } // Give access to all members in owner team. if u.IsOrganization() { t, err := u.getOwnerTeam(e) if err != nil { return fmt.Errorf("getOwnerTeam: %v", err) } else if err = t.addRepository(e, repo); err != nil { return fmt.Errorf("addRepository: %v", err) } } else { // Organization automatically called this in addRepository method. if err = repo.recalculateAccesses(e); err != nil { return fmt.Errorf("recalculateAccesses: %v", err) } } if err = watchRepo(e, u.Id, repo.ID, true); err != nil { return fmt.Errorf("watchRepo: %v", err) } else if err = newRepoAction(e, u, repo); err != nil { return fmt.Errorf("newRepoAction: %v", err) } return nil } // CreateRepository creates a repository for given user or organization. func CreateRepository(u *User, opts CreateRepoOptions) (_ *Repository, err error) { if !u.CanCreateRepo() { return nil, ErrReachLimitOfRepo{u.MaxRepoCreation} } repo := &Repository{ OwnerID: u.Id, Owner: u, Name: opts.Name, LowerName: strings.ToLower(opts.Name), Description: opts.Description, IsPrivate: opts.IsPrivate, EnableWiki: true, EnableIssues: true, EnablePulls: true, } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return nil, err } if err = createRepository(sess, u, repo); err != nil { return nil, err } // No need for init mirror. if !opts.IsMirror { repoPath := RepoPath(u.Name, repo.Name) if err = initRepository(sess, repoPath, u, repo, opts); err != nil { if err2 := os.RemoveAll(repoPath); err2 != nil { log.Error(4, "initRepository: %v", err) return nil, fmt.Errorf( "delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2) } return nil, fmt.Errorf("initRepository: %v", err) } _, stderr, err := process.ExecDir(-1, repoPath, fmt.Sprintf("CreateRepository(git update-server-info): %s", repoPath), "git", "update-server-info") if err != nil { return nil, errors.New("CreateRepository(git update-server-info): " + stderr) } } return repo, sess.Commit() } func countRepositories(showPrivate bool) int64 { sess := x.NewSession() if !showPrivate { sess.Where("is_private=?", false) } count, err := sess.Count(new(Repository)) if err != nil { log.Error(4, "countRepositories: %v", err) } return count } // CountRepositories returns number of repositories. func CountRepositories() int64 { return countRepositories(true) } // CountPublicRepositories returns number of public repositories. func CountPublicRepositories() int64 { return countRepositories(false) } // RepositoriesWithUsers returns number of repos in given page. func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) { repos := make([]*Repository, 0, pageSize) if err = x.Limit(pageSize, (page-1)*pageSize).Asc("id").Find(&repos); err != nil { return nil, err } for i := range repos { if err = repos[i].GetOwner(); err != nil { return nil, err } } return repos, nil } // RepoPath returns repository path by given user and repository name. func RepoPath(userName, repoName string) string { return filepath.Join(UserPath(userName), strings.ToLower(repoName)+".git") } // TransferOwnership transfers all corresponding setting from old user to new one. func TransferOwnership(u *User, newOwnerName string, repo *Repository) error { newOwner, err := GetUserByName(newOwnerName) if err != nil { return fmt.Errorf("get new owner '%s': %v", newOwnerName, err) } // Check if new owner has repository with same name. has, err := IsRepositoryExist(newOwner, repo.Name) if err != nil { return fmt.Errorf("IsRepositoryExist: %v", err) } else if has { return ErrRepoAlreadyExist{newOwnerName, repo.Name} } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return fmt.Errorf("sess.Begin: %v", err) } owner := repo.Owner // Note: we have to set value here to make sure recalculate accesses is based on // new owner. repo.OwnerID = newOwner.Id repo.Owner = newOwner // Update repository. if _, err := sess.Id(repo.ID).Update(repo); err != nil { return fmt.Errorf("update owner: %v", err) } // Remove redundant collaborators. collaborators, err := repo.getCollaborators(sess) if err != nil { return fmt.Errorf("getCollaborators: %v", err) } // Dummy object. collaboration := &Collaboration{RepoID: repo.ID} for _, c := range collaborators { collaboration.UserID = c.Id if c.Id == newOwner.Id || newOwner.IsOrgMember(c.Id) { if _, err = sess.Delete(collaboration); err != nil { return fmt.Errorf("remove collaborator '%d': %v", c.Id, err) } } } // Remove old team-repository relations. if owner.IsOrganization() { if err = owner.getTeams(sess); err != nil { return fmt.Errorf("getTeams: %v", err) } for _, t := range owner.Teams { if !t.hasRepository(sess, repo.ID) { continue } t.NumRepos-- if _, err := sess.Id(t.ID).AllCols().Update(t); err != nil { return fmt.Errorf("decrease team repository count '%d': %v", t.ID, err) } } if err = owner.removeOrgRepo(sess, repo.ID); err != nil { return fmt.Errorf("removeOrgRepo: %v", err) } } if newOwner.IsOrganization() { t, err := newOwner.getOwnerTeam(sess) if err != nil { return fmt.Errorf("getOwnerTeam: %v", err) } else if err = t.addRepository(sess, repo); err != nil { return fmt.Errorf("add to owner team: %v", err) } } else { // Organization called this in addRepository method. if err = repo.recalculateAccesses(sess); err != nil { return fmt.Errorf("recalculateAccesses: %v", err) } } // Update repository count. if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos+1 WHERE id=?", newOwner.Id); err != nil { return fmt.Errorf("increase new owner repository count: %v", err) } else if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", owner.Id); err != nil { return fmt.Errorf("decrease old owner repository count: %v", err) } if err = watchRepo(sess, newOwner.Id, repo.ID, true); err != nil { return fmt.Errorf("watchRepo: %v", err) } else if err = transferRepoAction(sess, u, owner, newOwner, repo); err != nil { return fmt.Errorf("transferRepoAction: %v", err) } // Change repository directory name. if err = os.Rename(RepoPath(owner.Name, repo.Name), RepoPath(newOwner.Name, repo.Name)); err != nil { return fmt.Errorf("rename repository directory: %v", err) } wikiPath := WikiPath(owner.Name, repo.Name) if com.IsExist(wikiPath) { RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath()) if err = os.Rename(wikiPath, WikiPath(newOwner.Name, repo.Name)); err != nil { return fmt.Errorf("rename repository wiki: %v", err) } } return sess.Commit() } // ChangeRepositoryName changes all corresponding setting from old repository name to new one. func ChangeRepositoryName(u *User, oldRepoName, newRepoName string) (err error) { oldRepoName = strings.ToLower(oldRepoName) newRepoName = strings.ToLower(newRepoName) if err = IsUsableName(newRepoName); err != nil { return err } has, err := IsRepositoryExist(u, newRepoName) if err != nil { return fmt.Errorf("IsRepositoryExist: %v", err) } else if has { return ErrRepoAlreadyExist{u.Name, newRepoName} } repo, err := GetRepositoryByName(u.Id, oldRepoName) if err != nil { return fmt.Errorf("GetRepositoryByName: %v", err) } // Change repository directory name. if err = os.Rename(repo.RepoPath(), RepoPath(u.Name, newRepoName)); err != nil { return fmt.Errorf("rename repository directory: %v", err) } wikiPath := repo.WikiPath() if com.IsExist(wikiPath) { if err = os.Rename(wikiPath, WikiPath(u.Name, newRepoName)); err != nil { return fmt.Errorf("rename repository wiki: %v", err) } RemoveAllWithNotice("Delete repository wiki local copy", repo.LocalWikiPath()) } return nil } func getRepositoriesByForkID(e Engine, forkID int64) ([]*Repository, error) { repos := make([]*Repository, 0, 10) return repos, e.Where("fork_id=?", forkID).Find(&repos) } // GetRepositoriesByForkID returns all repositories with given fork ID. func GetRepositoriesByForkID(forkID int64) ([]*Repository, error) { return getRepositoriesByForkID(x, forkID) } func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err error) { repo.LowerName = strings.ToLower(repo.Name) if len(repo.Description) > 255 { repo.Description = repo.Description[:255] } if len(repo.Website) > 255 { repo.Website = repo.Website[:255] } if _, err = e.Id(repo.ID).AllCols().Update(repo); err != nil { return fmt.Errorf("update: %v", err) } if visibilityChanged { if err = repo.getOwner(e); err != nil { return fmt.Errorf("getOwner: %v", err) } if repo.Owner.IsOrganization() { // Organization repository need to recalculate access table when visivility is changed. if err = repo.recalculateTeamAccesses(e, 0); err != nil { return fmt.Errorf("recalculateTeamAccesses: %v", err) } } forkRepos, err := getRepositoriesByForkID(e, repo.ID) if err != nil { return fmt.Errorf("getRepositoriesByForkID: %v", err) } for i := range forkRepos { forkRepos[i].IsPrivate = repo.IsPrivate if err = updateRepository(e, forkRepos[i], true); err != nil { return fmt.Errorf("updateRepository[%d]: %v", forkRepos[i].ID, err) } } } return nil } func UpdateRepository(repo *Repository, visibilityChanged bool) (err error) { sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if err = updateRepository(x, repo, visibilityChanged); err != nil { return fmt.Errorf("updateRepository: %v", err) } return sess.Commit() } // DeleteRepository deletes a repository for a user or organization. func DeleteRepository(uid, repoID int64) error { repo := &Repository{ID: repoID, OwnerID: uid} has, err := x.Get(repo) if err != nil { return err } else if !has { return ErrRepoNotExist{repoID, uid, ""} } // In case is a organization. org, err := GetUserByID(uid) if err != nil { return err } if org.IsOrganization() { if err = org.GetTeams(); err != nil { return err } } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if org.IsOrganization() { for _, t := range org.Teams { if !t.hasRepository(sess, repoID) { continue } else if err = t.removeRepository(sess, repo, false); err != nil { return err } } } if err = deleteBeans(sess, &Repository{ID: repoID}, &Access{RepoID: repo.ID}, &Action{RepoID: repo.ID}, &Watch{RepoID: repoID}, &Star{RepoID: repoID}, &Mirror{RepoID: repoID}, &IssueUser{RepoID: repoID}, &Milestone{RepoID: repoID}, &Release{RepoID: repoID}, &Collaboration{RepoID: repoID}, &PullRequest{BaseRepoID: repoID}, ); err != nil { return fmt.Errorf("deleteBeans: %v", err) } // Delete comments and attachments. issues := make([]*Issue, 0, 25) attachmentPaths := make([]string, 0, len(issues)) if err = sess.Where("repo_id=?", repoID).Find(&issues); err != nil { return err } for i := range issues { if _, err = sess.Delete(&Comment{IssueID: issues[i].ID}); err != nil { return err } attachments := make([]*Attachment, 0, 5) if err = sess.Where("issue_id=?", issues[i].ID).Find(&attachments); err != nil { return err } for j := range attachments { attachmentPaths = append(attachmentPaths, attachments[j].LocalPath()) } if _, err = sess.Delete(&Attachment{IssueID: issues[i].ID}); err != nil { return err } } if _, err = sess.Delete(&Issue{RepoID: repoID}); err != nil { return err } if repo.IsFork { if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks-1 WHERE id=?", repo.ForkID); err != nil { return fmt.Errorf("decrease fork count: %v", err) } } if _, err = sess.Exec("UPDATE `user` SET num_repos=num_repos-1 WHERE id=?", uid); err != nil { return err } // Remove repository files. repoPath := repo.repoPath(sess) RemoveAllWithNotice("Delete repository files", repoPath) wikiPaths := []string{repo.WikiPath(), repo.LocalWikiPath()} for _, wikiPath := range wikiPaths { RemoveAllWithNotice("Delete repository wiki", wikiPath) } // Remove attachment files. for i := range attachmentPaths { RemoveAllWithNotice("Delete attachment", attachmentPaths[i]) } if err = sess.Commit(); err != nil { return fmt.Errorf("Commit: %v", err) } if repo.NumForks > 0 { if repo.IsPrivate { forkRepos, err := GetRepositoriesByForkID(repo.ID) if err != nil { return fmt.Errorf("getRepositoriesByForkID: %v", err) } for i := range forkRepos { if err = DeleteRepository(forkRepos[i].OwnerID, forkRepos[i].ID); err != nil { log.Error(4, "DeleteRepository [%d]: %v", forkRepos[i].ID, err) } } } else { if _, err = x.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil { log.Error(4, "reset 'fork_id' and 'is_fork': %v", err) } } } return nil } // GetRepositoryByRef returns a Repository specified by a GFM reference. // See https://help.github.com/articles/writing-on-github#references for more information on the syntax. func GetRepositoryByRef(ref string) (*Repository, error) { n := strings.IndexByte(ref, byte('/')) if n < 2 { return nil, ErrInvalidReference } userName, repoName := ref[:n], ref[n+1:] user, err := GetUserByName(userName) if err != nil { return nil, err } return GetRepositoryByName(user.Id, repoName) } // GetRepositoryByName returns the repository by given name under user if exists. func GetRepositoryByName(uid int64, repoName string) (*Repository, error) { repo := &Repository{ OwnerID: uid, LowerName: strings.ToLower(repoName), } has, err := x.Get(repo) if err != nil { return nil, err } else if !has { return nil, ErrRepoNotExist{0, uid, repoName} } return repo, err } func getRepositoryByID(e Engine, id int64) (*Repository, error) { repo := new(Repository) has, err := e.Id(id).Get(repo) if err != nil { return nil, err } else if !has { return nil, ErrRepoNotExist{id, 0, ""} } return repo, nil } // GetRepositoryByID returns the repository by given id if exists. func GetRepositoryByID(id int64) (*Repository, error) { return getRepositoryByID(x, id) } // GetRepositories returns a list of repositories of given user. func GetRepositories(uid int64, private bool) ([]*Repository, error) { repos := make([]*Repository, 0, 10) sess := x.Desc("updated") if !private { sess.Where("is_private=?", false) } return repos, sess.Find(&repos, &Repository{OwnerID: uid}) } // GetRecentUpdatedRepositories returns the list of repositories that are recently updated. func GetRecentUpdatedRepositories(page int) (repos []*Repository, err error) { return repos, x.Limit(setting.ExplorePagingNum, (page-1)*setting.ExplorePagingNum). Where("is_private=?", false).Limit(setting.ExplorePagingNum).Desc("updated").Find(&repos) } func getRepositoryCount(e Engine, u *User) (int64, error) { return x.Count(&Repository{OwnerID: u.Id}) } // GetRepositoryCount returns the total number of repositories of user. func GetRepositoryCount(u *User) (int64, error) { return getRepositoryCount(x, u) } type SearchOption struct { Keyword string Uid int64 Limit int Private bool } // SearchRepositoryByName returns given number of repositories whose name contains keyword. func SearchRepositoryByName(opt SearchOption) (repos []*Repository, err error) { if len(opt.Keyword) == 0 { return repos, nil } opt.Keyword = strings.ToLower(opt.Keyword) repos = make([]*Repository, 0, opt.Limit) // Append conditions. sess := x.Limit(opt.Limit) if opt.Uid > 0 { sess.Where("owner_id=?", opt.Uid) } if !opt.Private { sess.And("is_private=?", false) } sess.And("lower_name like ?", "%"+opt.Keyword+"%").Find(&repos) return repos, err } // DeleteRepositoryArchives deletes all repositories' archives. func DeleteRepositoryArchives() error { return x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) return os.RemoveAll(filepath.Join(repo.RepoPath(), "archives")) }) } func gatherMissingRepoRecords() ([]*Repository, error) { repos := make([]*Repository, 0, 10) if err := x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) if !com.IsDir(repo.RepoPath()) { repos = append(repos, repo) } return nil }); err != nil { if err2 := CreateRepositoryNotice(fmt.Sprintf("gatherMissingRepoRecords: %v", err)); err2 != nil { return nil, fmt.Errorf("CreateRepositoryNotice: %v", err) } } return repos, nil } // DeleteMissingRepositories deletes all repository records that lost Git files. func DeleteMissingRepositories() error { repos, err := gatherMissingRepoRecords() if err != nil { return fmt.Errorf("gatherMissingRepoRecords: %v", err) } if len(repos) == 0 { return nil } for _, repo := range repos { log.Trace("Deleting %d/%d...", repo.OwnerID, repo.ID) if err := DeleteRepository(repo.OwnerID, repo.ID); err != nil { if err2 := CreateRepositoryNotice(fmt.Sprintf("DeleteRepository [%d]: %v", repo.ID, err)); err2 != nil { return fmt.Errorf("CreateRepositoryNotice: %v", err) } } } return nil } // ReinitMissingRepositories reinitializes all repository records that lost Git files. func ReinitMissingRepositories() error { repos, err := gatherMissingRepoRecords() if err != nil { return fmt.Errorf("gatherMissingRepoRecords: %v", err) } if len(repos) == 0 { return nil } for _, repo := range repos { log.Trace("Initializing %d/%d...", repo.OwnerID, repo.ID) if err := git.InitRepository(repo.RepoPath(), true); err != nil { if err2 := CreateRepositoryNotice(fmt.Sprintf("InitRepository [%d]: %v", repo.ID, err)); err2 != nil { return fmt.Errorf("CreateRepositoryNotice: %v", err) } } } return nil } // RewriteRepositoryUpdateHook rewrites all repositories' update hook. func RewriteRepositoryUpdateHook() error { return x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) return createUpdateHook(repo.RepoPath()) }) } // statusPool represents a pool of status with true/false. type statusPool struct { lock sync.RWMutex pool map[string]bool } // Start sets value of given name to true in the pool. func (p *statusPool) Start(name string) { p.lock.Lock() defer p.lock.Unlock() p.pool[name] = true } // Stop sets value of given name to false in the pool. func (p *statusPool) Stop(name string) { p.lock.Lock() defer p.lock.Unlock() p.pool[name] = false } // IsRunning checks if value of given name is set to true in the pool. func (p *statusPool) IsRunning(name string) bool { p.lock.RLock() defer p.lock.RUnlock() return p.pool[name] } // Prevent duplicate running tasks. var taskStatusPool = &statusPool{ pool: make(map[string]bool), } const ( _MIRROR_UPDATE = "mirror_update" _GIT_FSCK = "git_fsck" _CHECK_REPOs = "check_repos" ) // MirrorUpdate checks and updates mirror repositories. func MirrorUpdate() { if taskStatusPool.IsRunning(_MIRROR_UPDATE) { return } taskStatusPool.Start(_MIRROR_UPDATE) defer taskStatusPool.Stop(_MIRROR_UPDATE) log.Trace("Doing: MirrorUpdate") mirrors := make([]*Mirror, 0, 10) if err := x.Iterate(new(Mirror), func(idx int, bean interface{}) error { m := bean.(*Mirror) if m.NextUpdate.After(time.Now()) { return nil } if m.Repo == nil { log.Error(4, "Disconnected mirror repository found: %d", m.ID) return nil } repoPath := m.Repo.RepoPath() if _, stderr, err := process.ExecDir(10*time.Minute, repoPath, fmt.Sprintf("MirrorUpdate: %s", repoPath), "git", "remote", "update", "--prune"); err != nil { desc := fmt.Sprintf("Fail to update mirror repository(%s): %s", repoPath, stderr) log.Error(4, desc) if err = CreateRepositoryNotice(desc); err != nil { log.Error(4, "CreateRepositoryNotice: %v", err) } return nil } m.NextUpdate = time.Now().Add(time.Duration(m.Interval) * time.Hour) mirrors = append(mirrors, m) return nil }); err != nil { log.Error(4, "MirrorUpdate: %v", err) } for i := range mirrors { if err := UpdateMirror(mirrors[i]); err != nil { log.Error(4, "UpdateMirror[%d]: %v", mirrors[i].ID, err) } } } // GitFsck calls 'git fsck' to check repository health. func GitFsck() { if taskStatusPool.IsRunning(_GIT_FSCK) { return } taskStatusPool.Start(_GIT_FSCK) defer taskStatusPool.Stop(_GIT_FSCK) log.Trace("Doing: GitFsck") if err := x.Where("id>0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) repoPath := repo.RepoPath() if err := git.Fsck(repoPath, setting.Cron.RepoHealthCheck.Timeout, setting.Cron.RepoHealthCheck.Args...); err != nil { desc := fmt.Sprintf("Fail to health check repository (%s): %v", repoPath, err) log.Warn(desc) if err = CreateRepositoryNotice(desc); err != nil { log.Error(4, "CreateRepositoryNotice: %v", err) } } return nil }); err != nil { log.Error(4, "GitFsck: %v", err) } } func GitGcRepos() error { args := append([]string{"gc"}, setting.Git.GcArgs...) return x.Where("id > 0").Iterate(new(Repository), func(idx int, bean interface{}) error { repo := bean.(*Repository) if err := repo.GetOwner(); err != nil { return err } _, stderr, err := process.ExecDir(-1, RepoPath(repo.Owner.Name, repo.Name), "Repository garbage collection", "git", args...) if err != nil { return fmt.Errorf("%v: %v", err, stderr) } return nil }) } type repoChecker struct { querySQL, correctSQL string desc string } func repoStatsCheck(checker *repoChecker) { results, err := x.Query(checker.querySQL) if err != nil { log.Error(4, "Select %s: %v", checker.desc, err) return } for _, result := range results { id := com.StrTo(result["id"]).MustInt64() log.Trace("Updating %s: %d", checker.desc, id) _, err = x.Exec(checker.correctSQL, id, id) if err != nil { log.Error(4, "Update %s[%d]: %v", checker.desc, id, err) } } } func CheckRepoStats() { if taskStatusPool.IsRunning(_CHECK_REPOs) { return } taskStatusPool.Start(_CHECK_REPOs) defer taskStatusPool.Stop(_CHECK_REPOs) log.Trace("Doing: CheckRepoStats") checkers := []*repoChecker{ // Repository.NumWatches { "SELECT repo.id FROM `repository` repo WHERE repo.num_watches!=(SELECT COUNT(*) FROM `watch` WHERE repo_id=repo.id)", "UPDATE `repository` SET num_watches=(SELECT COUNT(*) FROM `watch` WHERE repo_id=?) WHERE id=?", "repository count 'num_watches'", }, // Repository.NumStars { "SELECT repo.id FROM `repository` repo WHERE repo.num_stars!=(SELECT COUNT(*) FROM `star` WHERE repo_id=repo.id)", "UPDATE `repository` SET num_stars=(SELECT COUNT(*) FROM `star` WHERE repo_id=?) WHERE id=?", "repository count 'num_stars'", }, // Label.NumIssues { "SELECT label.id FROM `label` WHERE label.num_issues!=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=label.id)", "UPDATE `label` SET num_issues=(SELECT COUNT(*) FROM `issue_label` WHERE label_id=?) WHERE id=?", "label count 'num_issues'", }, // User.NumRepos { "SELECT `user`.id FROM `user` WHERE `user`.num_repos!=(SELECT COUNT(*) FROM `repository` WHERE owner_id=`user`.id)", "UPDATE `user` SET num_repos=(SELECT COUNT(*) FROM `repository` WHERE owner_id=?) WHERE id=?", "user count 'num_repos'", }, // Issue.NumComments { "SELECT `issue`.id FROM `issue` WHERE `issue`.num_comments!=(SELECT COUNT(*) FROM `comment` WHERE issue_id=`issue`.id AND type=0)", "UPDATE `issue` SET num_comments=(SELECT COUNT(*) FROM `comment` WHERE issue_id=? AND type=0) WHERE id=?", "issue count 'num_comments'", }, } for i := range checkers { repoStatsCheck(checkers[i]) } // FIXME: use checker when v0.9, stop supporting old fork repo format. // ***** START: Repository.NumForks ***** results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)") if err != nil { log.Error(4, "Select repository count 'num_forks': %v", err) } else { for _, result := range results { id := com.StrTo(result["id"]).MustInt64() log.Trace("Updating repository count 'num_forks': %d", id) repo, err := GetRepositoryByID(id) if err != nil { log.Error(4, "GetRepositoryByID[%d]: %v", id, err) continue } rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID) if err != nil { log.Error(4, "Select count of forks[%d]: %v", repo.ID, err) continue } repo.NumForks = int(parseCountResult(rawResult)) if err = UpdateRepository(repo, false); err != nil { log.Error(4, "UpdateRepository[%d]: %v", id, err) continue } } } // ***** END: Repository.NumForks ***** } // _________ .__ .__ ___. __ .__ // \_ ___ \ ____ | | | | _____ \_ |__ ________________ _/ |_|__| ____ ____ // / \ \/ / _ \| | | | \__ \ | __ \ / _ \_ __ \__ \\ __\ |/ _ \ / \ // \ \___( <_> ) |_| |__/ __ \| \_\ ( <_> ) | \// __ \| | | ( <_> ) | \ // \______ /\____/|____/____(____ /___ /\____/|__| (____ /__| |__|\____/|___| / // \/ \/ \/ \/ \/ // A Collaboration is a relation between an individual and a repository type Collaboration struct { ID int64 `xorm:"pk autoincr"` RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` UserID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"` Created time.Time `xorm:"CREATED"` } // Add collaborator and accompanying access func (repo *Repository) AddCollaborator(u *User) error { collaboration := &Collaboration{ RepoID: repo.ID, UserID: u.Id, } has, err := x.Get(collaboration) if err != nil { return err } else if has { return nil } if err = repo.GetOwner(); err != nil { return fmt.Errorf("GetOwner: %v", err) } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if _, err = sess.InsertOne(collaboration); err != nil { return err } if repo.Owner.IsOrganization() { err = repo.recalculateTeamAccesses(sess, 0) } else { err = repo.recalculateAccesses(sess) } if err != nil { return fmt.Errorf("recalculateAccesses 'team=%v': %v", repo.Owner.IsOrganization(), err) } return sess.Commit() } func (repo *Repository) getCollaborators(e Engine) ([]*User, error) { collaborations := make([]*Collaboration, 0) if err := e.Find(&collaborations, &Collaboration{RepoID: repo.ID}); err != nil { return nil, err } users := make([]*User, len(collaborations)) for i, c := range collaborations { user, err := getUserByID(e, c.UserID) if err != nil { return nil, err } users[i] = user } return users, nil } // GetCollaborators returns the collaborators for a repository func (repo *Repository) GetCollaborators() ([]*User, error) { return repo.getCollaborators(x) } // Delete collaborator and accompanying access func (repo *Repository) DeleteCollaborator(u *User) (err error) { collaboration := &Collaboration{ RepoID: repo.ID, UserID: u.Id, } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return err } if has, err := sess.Delete(collaboration); err != nil || has == 0 { return err } else if err = repo.recalculateAccesses(sess); err != nil { return err } return sess.Commit() } // __ __ __ .__ // / \ / \_____ _/ |_ ____ | |__ // \ \/\/ /\__ \\ __\/ ___\| | \ // \ / / __ \| | \ \___| Y \ // \__/\ / (____ /__| \___ >___| / // \/ \/ \/ \/ // Watch is connection request for receiving repository notification. type Watch struct { ID int64 `xorm:"pk autoincr"` UserID int64 `xorm:"UNIQUE(watch)"` RepoID int64 `xorm:"UNIQUE(watch)"` } func isWatching(e Engine, uid, repoId int64) bool { has, _ := e.Get(&Watch{0, uid, repoId}) return has } // IsWatching checks if user has watched given repository. func IsWatching(uid, repoId int64) bool { return isWatching(x, uid, repoId) } func watchRepo(e Engine, uid, repoId int64, watch bool) (err error) { if watch { if isWatching(e, uid, repoId) { return nil } if _, err = e.Insert(&Watch{RepoID: repoId, UserID: uid}); err != nil { return err } _, err = e.Exec("UPDATE `repository` SET num_watches = num_watches + 1 WHERE id = ?", repoId) } else { if !isWatching(e, uid, repoId) { return nil } if _, err = e.Delete(&Watch{0, uid, repoId}); err != nil { return err } _, err = e.Exec("UPDATE `repository` SET num_watches=num_watches-1 WHERE id=?", repoId) } return err } // Watch or unwatch repository. func WatchRepo(uid, repoId int64, watch bool) (err error) { return watchRepo(x, uid, repoId, watch) } func getWatchers(e Engine, repoID int64) ([]*Watch, error) { watches := make([]*Watch, 0, 10) return watches, e.Find(&watches, &Watch{RepoID: repoID}) } // GetWatchers returns all watchers of given repository. func GetWatchers(repoID int64) ([]*Watch, error) { return getWatchers(x, repoID) } // Repository.GetWatchers returns range of users watching given repository. func (repo *Repository) GetWatchers(page int) ([]*User, error) { users := make([]*User, 0, ItemsPerPage) sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("watch.repo_id=?", repo.ID) if setting.UsePostgreSQL { sess = sess.Join("LEFT", "watch", `"user".id=watch.user_id`) } else { sess = sess.Join("LEFT", "watch", "user.id=watch.user_id") } return users, sess.Find(&users) } func notifyWatchers(e Engine, act *Action) error { // Add feeds for user self and all watchers. watches, err := getWatchers(e, act.RepoID) if err != nil { return fmt.Errorf("get watchers: %v", err) } // Add feed for actioner. act.UserID = act.ActUserID if _, err = e.InsertOne(act); err != nil { return fmt.Errorf("insert new actioner: %v", err) } for i := range watches { if act.ActUserID == watches[i].UserID { continue } act.ID = 0 act.UserID = watches[i].UserID if _, err = e.InsertOne(act); err != nil { return fmt.Errorf("insert new action: %v", err) } } return nil } // NotifyWatchers creates batch of actions for every watcher. func NotifyWatchers(act *Action) error { return notifyWatchers(x, act) } // _________ __ // / _____// |______ _______ // \_____ \\ __\__ \\_ __ \ // / \| | / __ \| | \/ // /_______ /|__| (____ /__| // \/ \/ type Star struct { ID int64 `xorm:"pk autoincr"` UID int64 `xorm:"UNIQUE(s)"` RepoID int64 `xorm:"UNIQUE(s)"` } // Star or unstar repository. func StarRepo(uid, repoId int64, star bool) (err error) { if star { if IsStaring(uid, repoId) { return nil } if _, err = x.Insert(&Star{UID: uid, RepoID: repoId}); err != nil { return err } else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars + 1 WHERE id = ?", repoId); err != nil { return err } _, err = x.Exec("UPDATE `user` SET num_stars = num_stars + 1 WHERE id = ?", uid) } else { if !IsStaring(uid, repoId) { return nil } if _, err = x.Delete(&Star{0, uid, repoId}); err != nil { return err } else if _, err = x.Exec("UPDATE `repository` SET num_stars = num_stars - 1 WHERE id = ?", repoId); err != nil { return err } _, err = x.Exec("UPDATE `user` SET num_stars = num_stars - 1 WHERE id = ?", uid) } return err } // IsStaring checks if user has starred given repository. func IsStaring(uid, repoId int64) bool { has, _ := x.Get(&Star{0, uid, repoId}) return has } func (repo *Repository) GetStargazers(page int) ([]*User, error) { users := make([]*User, 0, ItemsPerPage) sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("star.repo_id=?", repo.ID) if setting.UsePostgreSQL { sess = sess.Join("LEFT", "star", `"user".id=star.uid`) } else { sess = sess.Join("LEFT", "star", "user.id=star.uid") } return users, sess.Find(&users) } // ___________ __ // \_ _____/__________| | __ // | __)/ _ \_ __ \ |/ / // | \( <_> ) | \/ < // \___ / \____/|__| |__|_ \ // \/ \/ // HasForkedRepo checks if given user has already forked a repository with given ID. func HasForkedRepo(ownerID, repoID int64) (*Repository, bool) { repo := new(Repository) has, _ := x.Where("owner_id=? AND fork_id=?", ownerID, repoID).Get(repo) return repo, has } func ForkRepository(u *User, oldRepo *Repository, name, desc string) (_ *Repository, err error) { repo := &Repository{ OwnerID: u.Id, Owner: u, Name: name, LowerName: strings.ToLower(name), Description: desc, DefaultBranch: oldRepo.DefaultBranch, IsPrivate: oldRepo.IsPrivate, IsFork: true, ForkID: oldRepo.ID, } sess := x.NewSession() defer sessionRelease(sess) if err = sess.Begin(); err != nil { return nil, err } if err = createRepository(sess, u, repo); err != nil { return nil, err } if _, err = sess.Exec("UPDATE `repository` SET num_forks=num_forks+1 WHERE id=?", oldRepo.ID); err != nil { return nil, err } repoPath := RepoPath(u.Name, repo.Name) _, stderr, err := process.ExecTimeout(10*time.Minute, fmt.Sprintf("ForkRepository(git clone): %s/%s", u.Name, repo.Name), "git", "clone", "--bare", oldRepo.RepoPath(), repoPath) if err != nil { return nil, fmt.Errorf("git clone: %v", stderr) } _, stderr, err = process.ExecDir(-1, repoPath, fmt.Sprintf("ForkRepository(git update-server-info): %s", repoPath), "git", "update-server-info") if err != nil { return nil, fmt.Errorf("git update-server-info: %v", err) } if err = createUpdateHook(repoPath); err != nil { return nil, fmt.Errorf("createUpdateHook: %v", err) } return repo, sess.Commit() } func (repo *Repository) GetForks() ([]*Repository, error) { forks := make([]*Repository, 0, repo.NumForks) return forks, x.Find(&forks, &Repository{ForkID: repo.ID}) }
1
10,313
Better call it `CleanUpMirrorInfo`?
gogs-gogs
go
@@ -405,6 +405,19 @@ func (node *Node) SetupMining(ctx context.Context) error { } } + if err := node.StorageMining.Start(ctx); err != nil { + fmt.Printf("error starting storage miner: %s\n", err) + } + + if err := node.StorageProtocol.StorageProvider.Start(ctx); err != nil { + fmt.Printf("error starting storage provider: %s\n", err) + } + + // TODO: Retrieval Market Integration + //if err := node.RetrievalProtocol.RetrievalProvider.Start(); err != nil { + // fmt.Printf("error starting retrieval provider: %s\n", err) + //} + return nil }
1
package node import ( "context" "fmt" "os" "reflect" "runtime" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-sectorbuilder" "github.com/filecoin-project/go-sectorbuilder/fs" "github.com/filecoin-project/specs-actors/actors/abi" fbig "github.com/filecoin-project/specs-actors/actors/abi/big" bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" logging "github.com/ipfs/go-log" "github.com/libp2p/go-libp2p-core/host" "github.com/pkg/errors" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/internal/submodule" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paths" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/paymentchannel" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/plumbing/msg" "github.com/filecoin-project/go-filecoin/internal/app/go-filecoin/porcelain" "github.com/filecoin-project/go-filecoin/internal/pkg/block" "github.com/filecoin-project/go-filecoin/internal/pkg/cborutil" "github.com/filecoin-project/go-filecoin/internal/pkg/chain" "github.com/filecoin-project/go-filecoin/internal/pkg/clock" "github.com/filecoin-project/go-filecoin/internal/pkg/config" "github.com/filecoin-project/go-filecoin/internal/pkg/consensus" "github.com/filecoin-project/go-filecoin/internal/pkg/constants" "github.com/filecoin-project/go-filecoin/internal/pkg/message" "github.com/filecoin-project/go-filecoin/internal/pkg/metrics" "github.com/filecoin-project/go-filecoin/internal/pkg/mining" "github.com/filecoin-project/go-filecoin/internal/pkg/net/pubsub" "github.com/filecoin-project/go-filecoin/internal/pkg/piecemanager" "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/drand" mining_protocol "github.com/filecoin-project/go-filecoin/internal/pkg/protocol/mining" "github.com/filecoin-project/go-filecoin/internal/pkg/repo" "github.com/filecoin-project/go-filecoin/internal/pkg/state" "github.com/filecoin-project/go-filecoin/internal/pkg/version" ) var log = logging.Logger("node") // nolint: deadcode var ( // ErrNoMinerAddress is returned when the node is not configured to have any miner addresses. ErrNoMinerAddress = errors.New("no miner addresses configured") ) // Node represents a full Filecoin node. type Node struct { // OfflineMode, when true, disables libp2p. OfflineMode bool // ChainClock is a chainClock used by the node for chain epoch. ChainClock clock.ChainEpochClock // Repo is the repo this node was created with. // // It contains all persistent artifacts of the filecoin node. Repo repo.Repo PorcelainAPI *porcelain.API DrandAPI *drand.API // // Core services // Blockstore submodule.BlockstoreSubmodule network submodule.NetworkSubmodule Blockservice submodule.BlockServiceSubmodule Discovery submodule.DiscoverySubmodule // // Subsystems // chain submodule.ChainSubmodule syncer submodule.SyncerSubmodule BlockMining submodule.BlockMiningSubmodule StorageMining *submodule.StorageMiningSubmodule // // Supporting services // Wallet submodule.WalletSubmodule Messaging submodule.MessagingSubmodule StorageNetworking submodule.StorageNetworkingSubmodule ProofVerification submodule.ProofVerificationSubmodule // // Protocols // VersionTable *version.ProtocolVersionTable StorageProtocol *submodule.StorageProtocolSubmodule RetrievalProtocol *submodule.RetrievalProtocolSubmodule } // Start boots up the node. func (node *Node) Start(ctx context.Context) error { if err := metrics.RegisterPrometheusEndpoint(node.Repo.Config().Observability.Metrics); err != nil { return errors.Wrap(err, "failed to setup metrics") } if err := metrics.RegisterJaeger(node.network.Host.ID().Pretty(), node.Repo.Config().Observability.Tracing); err != nil { return errors.Wrap(err, "failed to setup tracing") } err := node.chain.Start(ctx, node) if err != nil { return err } // Only set these up if there is a miner configured. if _, err := node.MiningAddress(); err == nil { if err := node.setupStorageMining(ctx); err != nil { log.Errorf("setup mining failed: %v", err) return err } } // TODO: defer establishing these API endpoints until the chain is synced when the commands // can handle their absence: https://github.com/filecoin-project/go-filecoin/issues/3137 err = node.setupProtocols() if err != nil { return errors.Wrap(err, "failed to set up protocols:") } // DRAGONS: uncomment when we have retrieval market integration //node.RetrievalProtocol.RetrievalProvider = retrieval.NewMiner() var syncCtx context.Context syncCtx, node.syncer.CancelChainSync = context.WithCancel(context.Background()) // Wire up propagation of new chain heads from the chain store to other components. head, err := node.PorcelainAPI.ChainHead() if err != nil { return errors.Wrap(err, "failed to get chain head") } go node.handleNewChainHeads(syncCtx, head) if !node.OfflineMode { // Subscribe to block pubsub topic to learn about new chain heads. node.syncer.BlockSub, err = node.pubsubscribe(syncCtx, node.syncer.BlockTopic, node.handleBlockSub) if err != nil { log.Error(err) } // Subscribe to the message pubsub topic to learn about messages to mine into blocks. // TODO: defer this subscription until after mining (block production) is started: // https://github.com/filecoin-project/go-filecoin/issues/2145. // This is blocked by https://github.com/filecoin-project/go-filecoin/issues/2959, which // is necessary for message_propagate_test to start mining before testing this behaviour. node.Messaging.MessageSub, err = node.pubsubscribe(syncCtx, node.Messaging.MessageTopic, node.processMessage) if err != nil { return err } if err := node.setupHeartbeatServices(ctx); err != nil { return errors.Wrap(err, "failed to start heartbeat services") } // Start node discovery if err := node.Discovery.Start(node); err != nil { return err } if err := node.syncer.Start(syncCtx, node); err != nil { return err } // Wire up syncing and possible mining go node.doMiningPause(syncCtx) } return nil } // Subscribes a handler function to a pubsub topic. func (node *Node) pubsubscribe(ctx context.Context, topic *pubsub.Topic, handler pubSubHandler) (pubsub.Subscription, error) { sub, err := topic.Subscribe() if err != nil { return nil, errors.Wrapf(err, "failed to subscribe") } go node.handleSubscription(ctx, sub, handler) return sub, nil } func (node *Node) setupHeartbeatServices(ctx context.Context) error { mag := func() address.Address { addr, err := node.MiningAddress() // the only error MiningAddress() returns is ErrNoMinerAddress. // if there is no configured miner address, simply send a zero // address across the wire. if err != nil { return address.Undef } return addr } // start the primary heartbeat service if len(node.Repo.Config().Heartbeat.BeatTarget) > 0 { hbs := metrics.NewHeartbeatService(node.Host(), node.chain.ChainReader.GenesisCid(), node.Repo.Config().Heartbeat, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag)) go hbs.Start(ctx) } // check if we want to connect to an alert service. An alerting service is a heartbeat // service that can trigger alerts based on the contents of heatbeats. if alertTarget := os.Getenv("FIL_HEARTBEAT_ALERTS"); len(alertTarget) > 0 { ahbs := metrics.NewHeartbeatService(node.Host(), node.chain.ChainReader.GenesisCid(), &config.HeartbeatConfig{ BeatTarget: alertTarget, BeatPeriod: "10s", ReconnectPeriod: "10s", Nickname: node.Repo.Config().Heartbeat.Nickname, }, node.PorcelainAPI.ChainHead, metrics.WithMinerAddressGetter(mag)) go ahbs.Start(ctx) } return nil } func (node *Node) setIsMining(isMining bool) { node.BlockMining.Mining.Lock() defer node.BlockMining.Mining.Unlock() node.BlockMining.Mining.IsMining = isMining } func (node *Node) handleNewMiningOutput(ctx context.Context, miningOutCh <-chan mining.Output) { defer func() { node.BlockMining.MiningDoneWg.Done() }() for { select { case <-ctx.Done(): return case output, ok := <-miningOutCh: if !ok { return } if output.Err != nil { log.Errorf("stopping mining. error: %s", output.Err.Error()) node.StopMining(context.Background()) } else { node.BlockMining.MiningDoneWg.Add(1) go func() { if node.IsMining() { node.BlockMining.AddNewlyMinedBlock(ctx, output) } node.BlockMining.MiningDoneWg.Done() }() } } } } func (node *Node) handleNewChainHeads(ctx context.Context, firstHead block.TipSet) { newHeadCh := node.chain.ChainReader.HeadEvents().Sub(chain.NewHeadTopic) defer log.Infof("new head handler exited") defer node.chain.ChainReader.HeadEvents().Unsub(newHeadCh) handler := message.NewHeadHandler(node.Messaging.Inbox, node.Messaging.Outbox, node.chain.ChainReader, firstHead) for { log.Debugf("waiting for new head") select { case ts, ok := <-newHeadCh: if !ok { log.Errorf("failed new head channel receive") return } newHead, ok := ts.(block.TipSet) if !ok { log.Errorf("non-tipset published on heaviest tipset channel") continue } height, _ := newHead.Height() log.Debugf("received new head height %s, key %s", height, newHead.Key()) if node.StorageMining != nil { log.Debugf("storage mining handling new head") if err := node.StorageMining.HandleNewHead(ctx, newHead); err != nil { log.Error(err) } } log.Debugf("message pool handling new head") if err := handler.HandleNewHead(ctx, newHead); err != nil { log.Error(err) } case <-ctx.Done(): return } } } func (node *Node) cancelSubscriptions() { if node.syncer.CancelChainSync != nil { node.syncer.CancelChainSync() } if node.syncer.BlockSub != nil { node.syncer.BlockSub.Cancel() node.syncer.BlockSub = nil } if node.Messaging.MessageSub != nil { node.Messaging.MessageSub.Cancel() node.Messaging.MessageSub = nil } } // Stop initiates the shutdown of the node. func (node *Node) Stop(ctx context.Context) { node.StopMining(ctx) node.cancelSubscriptions() node.chain.ChainReader.Stop() if node.StorageMining != nil { if err := node.StorageMining.Stop(ctx); err != nil { fmt.Printf("error stopping storage miner: %s\n", err) } node.StorageMining = nil } if err := node.Host().Close(); err != nil { fmt.Printf("error closing host: %s\n", err) } if err := node.Repo.Close(); err != nil { fmt.Printf("error closing repo: %s\n", err) } node.Discovery.Stop() fmt.Println("stopping filecoin :(") } func (node *Node) addNewlyMinedBlock(ctx context.Context, o mining.Output) { log.Debugf("Got a newly mined block from the mining worker: %s", o.Header) if err := node.AddNewBlock(ctx, o); err != nil { log.Warnf("error adding new mined block: %s. err: %s", o.Header.Cid().String(), err.Error()) } } func (node *Node) addMinedBlockSynchronous(ctx context.Context, o mining.Output) error { wait := node.syncer.ChainSyncManager.BlockProposer().WaiterForTarget(block.NewTipSetKey(o.Header.Cid())) err := node.AddNewBlock(ctx, o) if err != nil { return err } err = wait() return err } // MiningAddress returns the address of the mining actor mining on behalf of // the node. func (node *Node) MiningAddress() (address.Address, error) { addr := node.Repo.Config().Mining.MinerAddress if addr.Empty() { return address.Undef, ErrNoMinerAddress } return addr, nil } // SetupMining initializes all the functionality the node needs to start mining. // This method is idempotent. func (node *Node) SetupMining(ctx context.Context) error { // ensure we have a miner actor before we even consider mining minerAddr, err := node.MiningAddress() if err != nil { return errors.Wrap(err, "failed to get mining address") } head := node.PorcelainAPI.ChainHeadKey() _, err = node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head) if err != nil { return errors.Wrap(err, "failed to get miner actor") } // ensure we've got our storage mining submodule configured if node.StorageMining == nil { if err := node.setupStorageMining(ctx); err != nil { return err } } if node.RetrievalProtocol == nil { if err := node.setupRetrievalMining(ctx); err != nil { return err } } // ensure we have a mining worker if node.BlockMining.MiningWorker == nil { if node.BlockMining.MiningWorker, err = node.CreateMiningWorker(ctx); err != nil { return err } } return nil } func registeredProofsFromSectorSize(ss abi.SectorSize) (registeredSealProof abi.RegisteredProof, registeredPoStProof abi.RegisteredProof, err error) { switch ss { case constants.DevSectorSize: return constants.DevRegisteredPoStProof, constants.DevRegisteredSealProof, nil case constants.ThirtyTwoGiBSectorSize: return abi.RegisteredProof_StackedDRG32GiBPoSt, abi.RegisteredProof_StackedDRG32GiBSeal, nil case constants.EightMiBSectorSize: return abi.RegisteredProof_StackedDRG8MiBPoSt, abi.RegisteredProof_StackedDRG8MiBSeal, nil case constants.FiveHundredTwelveMiBSectorSize: return abi.RegisteredProof_StackedDRG512MiBPoSt, abi.RegisteredProof_StackedDRG512MiBSeal, nil default: return 0, 0, errors.Errorf("unsupported sector size %d", ss) } } func (node *Node) setupStorageMining(ctx context.Context) error { if node.StorageMining != nil { return errors.New("storage mining submodule has already been initialized") } minerAddr, err := node.MiningAddress() if err != nil { return err } head := node.Chain().ChainReader.GetHead() status, err := node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head) if err != nil { return err } repoPath, err := node.Repo.Path() if err != nil { return err } sectorDir, err := paths.GetSectorPath(node.Repo.Config().SectorBase.RootDir, repoPath) if err != nil { return err } postProofType, sealProofType, err := registeredProofsFromSectorSize(status.SectorSize) if err != nil { return err } sectorBuilder, err := sectorbuilder.New(&sectorbuilder.Config{ PoStProofType: postProofType, SealProofType: sealProofType, Miner: minerAddr, WorkerThreads: 2, Paths: []fs.PathConfig{ { Path: sectorDir, Cache: true, Weight: 1, }, }, }, namespace.Wrap(node.Repo.Datastore(), ds.NewKey("/sectorbuilder"))) if err != nil { return err } cborStore := node.Blockstore.CborStore waiter := msg.NewWaiter(node.chain.ChainReader, node.chain.MessageStore, node.Blockstore.Blockstore, cborStore) // TODO: rework these modules so they can be at least partially constructed during the building phase #3738 stateViewer := state.NewViewer(cborStore) node.StorageMining, err = submodule.NewStorageMiningSubmodule(minerAddr, node.Repo.Datastore(), sectorBuilder, &node.chain, &node.Messaging, waiter, &node.Wallet, stateViewer, node.BlockMining.PoStGenerator) if err != nil { return err } node.StorageProtocol, err = submodule.NewStorageProtocolSubmodule( ctx, minerAddr, address.Undef, // TODO: This is for setting up mining, we need to pass the client address in if this is going to be a storage client also &node.chain, &node.Messaging, waiter, node.StorageMining.PieceManager, node.Wallet.Signer, node.Host(), node.Repo.Datastore(), node.Blockstore.Blockstore, node.network.GraphExchange, repoPath, sectorBuilder.SealProofType(), stateViewer, ) if err != nil { return errors.Wrap(err, "error initializing storage protocol") } return nil } func (node *Node) setupRetrievalMining(ctx context.Context) error { providerAddr, err := node.MiningAddress() if err != nil { return errors.Wrap(err, "failed to get mining address") } waiter := msg.NewWaiter(node.chain.ChainReader, node.chain.MessageStore, node.Blockstore.Blockstore, node.Blockstore.CborStore) mgrStateViewer := paymentchannel.NewManagerStateViewer(node.Chain().ChainReader, node.Blockstore.CborStore) paychMgr := paymentchannel.NewManager( ctx, node.Repo.Datastore(), waiter, node.Messaging.Outbox, mgrStateViewer) rp, err := submodule.NewRetrievalProtocolSubmodule( node.Blockstore.Blockstore, node.Repo.Datastore(), node.chain.State, node.Host(), providerAddr, node.Wallet.Signer, paychMgr, node.PieceManager(), ) if err != nil { return errors.Wrap(err, "failed to build node.RetrievalProtocol") } node.RetrievalProtocol = rp return nil } func (node *Node) doMiningPause(ctx context.Context) { // doMiningPause receives state transition signals from the syncer // dispatcher allowing syncing to make progress. // // When mining, the node passes these signals along to the scheduler // pausing and continuing mining based on syncer state. catchupCh := node.Syncer().ChainSyncManager.TransitionChannel() for { select { case <-ctx.Done(): return case toCatchup, ok := <-catchupCh: if !ok { return } if node.BlockMining.MiningScheduler == nil { // drop syncer transition signals if not mining continue } if toCatchup { node.BlockMining.MiningScheduler.Pause() } else { node.BlockMining.MiningScheduler.Continue() } } } } // StartMining causes the node to start feeding blocks to the mining worker and initializes // the StorageMining for the mining address. func (node *Node) StartMining(ctx context.Context) error { if node.IsMining() { return errors.New("Node is already mining") } err := node.SetupMining(ctx) if err != nil { return errors.Wrap(err, "failed to setup mining") } if node.BlockMining.MiningScheduler == nil { node.BlockMining.MiningScheduler = mining.NewScheduler(node.BlockMining.MiningWorker, node.PorcelainAPI.ChainHead, node.ChainClock) } else if node.BlockMining.MiningScheduler.IsStarted() { return fmt.Errorf("miner scheduler already started") } // The block mining scheduler Start() accepts a long-running context, and stopping is performed by cancellation of // that context. // The storage mining module and provider take the immediate context, hopefully don't run any goroutines that // shut down when that context is done (which is ~immediately), and provide explicit Stop() methods instead. // We should pick one consistent way of doing things. var miningCtx context.Context miningCtx, node.BlockMining.CancelMining = context.WithCancel(context.Background()) outCh, doneWg := node.BlockMining.MiningScheduler.Start(miningCtx) node.BlockMining.MiningDoneWg = doneWg node.BlockMining.AddNewlyMinedBlock = node.addNewlyMinedBlock node.BlockMining.MiningDoneWg.Add(1) go node.handleNewMiningOutput(miningCtx, outCh) if err := node.StorageMining.Start(ctx); err != nil { fmt.Printf("error starting storage miner: %s\n", err) } if err := node.StorageProtocol.StorageProvider.Start(ctx); err != nil { fmt.Printf("error starting storage provider: %s\n", err) } // TODO: Retrieval Market Integration //if err := node.RetrievalProtocol.RetrievalProvider.Start(); err != nil { // fmt.Printf("error starting retrieval provider: %s\n", err) //} node.setIsMining(true) return nil } // StopMining stops mining on new blocks. func (node *Node) StopMining(ctx context.Context) { node.setIsMining(false) if node.BlockMining.CancelMining != nil { node.BlockMining.CancelMining() } if node.BlockMining.MiningDoneWg != nil { node.BlockMining.MiningDoneWg.Wait() } if node.StorageMining != nil { err := node.StorageMining.Stop(ctx) if err != nil { log.Warn("Error stopping storage miner", err) } } } func (node *Node) handleSubscription(ctx context.Context, sub pubsub.Subscription, handler pubSubHandler) { for { received, err := sub.Next(ctx) if err != nil { if ctx.Err() != context.Canceled { log.Errorf("error reading message from topic %s: %s", sub.Topic(), err) } return } if err := handler(ctx, received); err != nil { handlerName := runtime.FuncForPC(reflect.ValueOf(handler).Pointer()).Name() if err != context.Canceled { log.Errorf("error in handler %s for topic %s: %s", handlerName, sub.Topic(), err) } } } } // setupProtocols creates protocol clients and miners, then sets the node's APIs // for each func (node *Node) setupProtocols() error { blockMiningAPI := mining_protocol.New( node.MiningAddress, node.addMinedBlockSynchronous, node.chain.ChainReader, node.IsMining, node.SetupMining, node.StartMining, node.StopMining, node.GetMiningWorker, node.ChainClock, ) node.BlockMining.BlockMiningAPI = &blockMiningAPI return nil } // GetMiningWorker ensures mining is setup and then returns the worker func (node *Node) GetMiningWorker(ctx context.Context) (*mining.DefaultWorker, error) { if err := node.SetupMining(ctx); err != nil { return nil, err } return node.BlockMining.MiningWorker, nil } // CreateMiningWorker creates a mining.Worker for the node using the configured // getStateTree, getWeight, and getAncestors functions for the node func (node *Node) CreateMiningWorker(ctx context.Context) (*mining.DefaultWorker, error) { minerAddr, err := node.MiningAddress() if err != nil { return nil, errors.Wrap(err, "failed to get mining address") } head := node.PorcelainAPI.ChainHeadKey() minerStatus, err := node.PorcelainAPI.MinerGetStatus(ctx, minerAddr, head) if err != nil { log.Errorf("could not get owner address of miner actor") return nil, err } return mining.NewDefaultWorker(mining.WorkerParameters{ API: node.PorcelainAPI, MinerAddr: minerAddr, MinerOwnerAddr: minerStatus.OwnerAddress, WorkerSigner: node.Wallet.Signer, GetStateTree: node.chain.ChainReader.GetTipSetState, GetWeight: node.getWeight, Election: consensus.NewElectionMachine(node.PorcelainAPI), TicketGen: consensus.NewTicketMachine(node.PorcelainAPI), TipSetMetadata: node.chain.ChainReader, MessageSource: node.Messaging.Inbox.Pool(), MessageStore: node.chain.MessageStore, MessageQualifier: consensus.NewMessagePenaltyChecker(node.Chain().State), Blockstore: node.Blockstore.Blockstore, Clock: node.ChainClock, Poster: node.StorageMining.PoStGenerator, }), nil } // getWeight is the default GetWeight function for the mining worker. func (node *Node) getWeight(ctx context.Context, ts block.TipSet) (fbig.Int, error) { parent, err := ts.Parents() if err != nil { return fbig.Zero(), err } var baseStRoot cid.Cid if parent.Empty() { // use genesis state as parent state of genesis block baseStRoot, err = node.chain.ChainReader.GetTipSetStateRoot(ts.Key()) } else { baseStRoot, err = node.chain.ChainReader.GetTipSetStateRoot(parent) } if err != nil { return fbig.Zero(), err } return node.syncer.ChainSelector.Weight(ctx, ts, baseStRoot) } // -- Accessors // Host returns the nodes host. func (node *Node) Host() host.Host { return node.network.Host } // PieceManager returns the node's PieceManager. func (node *Node) PieceManager() piecemanager.PieceManager { return node.StorageMining.PieceManager } // BlockService returns the nodes blockservice. func (node *Node) BlockService() bserv.BlockService { return node.Blockservice.Blockservice } // CborStore returns the nodes cborStore. func (node *Node) CborStore() *cborutil.IpldStore { return node.Blockstore.CborStore } // IsMining returns a boolean indicating whether the node is mining blocks. func (node *Node) IsMining() bool { node.BlockMining.Mining.Lock() defer node.BlockMining.Mining.Unlock() return node.BlockMining.Mining.IsMining } // Chain returns the chain submodule. func (node *Node) Chain() submodule.ChainSubmodule { return node.chain } // Syncer returns the syncer submodule. func (node *Node) Syncer() submodule.SyncerSubmodule { return node.syncer } // Network returns the network submodule. func (node *Node) Network() submodule.NetworkSubmodule { return node.network }
1
23,540
@shannonwells is there still something missing here?
filecoin-project-venus
go
@@ -149,6 +149,9 @@ MESSAGE # don't accidentally leave the seed encoded. define_reader :seed + # Time limit for stress testing, if any (default: nil). + add_setting :stress_test + # When a block passed to pending fails (as expected), display the failure # without reporting it as a failure (default: false). add_setting :show_failures_in_pending_blocks
1
require 'fileutils' require 'rspec/core/backtrace_cleaner' require 'rspec/core/ruby_project' module RSpec module Core # Stores runtime configuration information. # # Configuration options are loaded from `~/.rspec`, `.rspec`, # `.rspec-local`, command line switches, and the `SPEC_OPTS` environment # variable (listed in lowest to highest precedence; for example, an option # in `~/.rspec` can be overridden by an option in `.rspec-local`). # # @example Standard settings # RSpec.configure do |c| # c.drb = true # c.drb_port = 1234 # c.default_path = 'behavior' # end # # @example Hooks # RSpec.configure do |c| # c.before(:suite) { establish_connection } # c.before(:each) { log_in_as :authorized } # c.around(:each) { |ex| Database.transaction(&ex) } # end # # @see RSpec.configure # @see Hooks class Configuration include RSpec::Core::Hooks class MustBeConfiguredBeforeExampleGroupsError < StandardError; end # @private def self.define_reader(name) define_method(name) do variable = instance_variable_defined?("@#{name}") ? instance_variable_get("@#{name}") : nil value_for(name, variable) end end # @private def self.deprecate_alias_key RSpec.warn_deprecation <<-MESSAGE The :alias option to add_setting is deprecated. Use :alias_with on the original setting instead. Called from #{caller(0)[5]} MESSAGE end # @private def self.define_aliases(name, alias_name) alias_method alias_name, name alias_method "#{alias_name}=", "#{name}=" define_predicate_for alias_name end # @private def self.define_predicate_for(*names) names.each {|name| alias_method "#{name}?", name} end # @private # # Invoked by the `add_setting` instance method. Use that method on a # `Configuration` instance rather than this class method. def self.add_setting(name, opts={}) raise "Use the instance add_setting method if you want to set a default" if opts.has_key?(:default) if opts[:alias] deprecate_alias_key define_aliases(opts[:alias], name) else attr_writer name define_reader name define_predicate_for name end [opts[:alias_with]].flatten.compact.each do |alias_name| define_aliases(name, alias_name) end end # @macro [attach] add_setting # @attribute $1 # Path to use if no path is provided to the `rspec` command (default: # `"spec"`). Allows you to just type `rspec` instead of `rspec spec` to # run all the examples in the `spec` directory. add_setting :default_path # Run examples over DRb (default: `false`). RSpec doesn't supply the DRb # server, but you can use tools like spork. add_setting :drb # The drb_port (default: nil). add_setting :drb_port # Default: `$stderr`. add_setting :error_stream # Clean up and exit after the first failure (default: `false`). add_setting :fail_fast # The exit code to return if there are any failures (default: 1). add_setting :failure_exit_code # Determines the order in which examples are run (default: OS standard # load order for files, declaration order for groups and examples). define_reader :order # Indicates files configured to be required define_reader :requires # Returns dirs that have been prepended to the load path by #lib= define_reader :libs # Default: `$stdout`. # Also known as `output` and `out` add_setting :output_stream, :alias_with => [:output, :out] # Load files matching this pattern (default: `'**/*_spec.rb'`) add_setting :pattern, :alias_with => :filename_pattern # Report the times for the slowest examples (default: `false`). # Use this to specify the number of examples to include in the profile. add_setting :profile_examples # Run all examples if none match the configured filters (default: `false`). add_setting :run_all_when_everything_filtered # Allow user to configure their own success/pending/failure colors # @param [Symbol] should be one of the following: [:black, :white, :red, :green, :yellow, :blue, :magenta, :cyan] add_setting :success_color add_setting :pending_color add_setting :failure_color add_setting :default_color add_setting :fixed_color add_setting :detail_color # Seed for random ordering (default: generated randomly each run). # # When you run specs with `--order random`, RSpec generates a random seed # for the randomization and prints it to the `output_stream` (assuming # you're using RSpec's built-in formatters). If you discover an ordering # dependency (i.e. examples fail intermittently depending on order), set # this (on Configuration or on the command line with `--seed`) to run # using the same seed while you debug the issue. # # We recommend, actually, that you use the command line approach so you # don't accidentally leave the seed encoded. define_reader :seed # When a block passed to pending fails (as expected), display the failure # without reporting it as a failure (default: false). add_setting :show_failures_in_pending_blocks # Convert symbols to hashes with the symbol as a key with a value of # `true` (default: false). # # This allows you to tag a group or example like this: # # describe "something slow", :slow do # # ... # end # # ... instead of having to type: # # describe "something slow", :slow => true do # # ... # end add_setting :treat_symbols_as_metadata_keys_with_true_values # @private add_setting :tty # @private add_setting :include_or_extend_modules # @private add_setting :files_to_run # @private add_setting :expecting_with_rspec # @private attr_accessor :filter_manager attr_reader :backtrace_cleaner def initialize @expectation_frameworks = [] @include_or_extend_modules = [] @mock_framework = nil @files_to_run = [] @formatters = [] @color = false @pattern = '**/*_spec.rb' @failure_exit_code = 1 @backtrace_cleaner = BacktraceCleaner.new @default_path = 'spec' @filter_manager = FilterManager.new @preferred_options = {} @seed = srand % 0xFFFF @failure_color = :red @success_color = :green @pending_color = :yellow @default_color = :white @fixed_color = :blue @detail_color = :cyan @profile_examples = false @requires = [] @libs = [] end # @private # # Used to set higher priority option values from the command line. def force(hash) if hash.has_key?(:seed) hash[:order], hash[:seed] = order_and_seed_from_seed(hash[:seed]) elsif hash.has_key?(:order) set_order_and_seed(hash) end @preferred_options.merge!(hash) self.warnings = value_for :warnings, nil end # @private def reset @reporter = nil @formatters.clear end # @overload add_setting(name) # @overload add_setting(name, opts) # @option opts [Symbol] :default # # set a default value for the generated getter and predicate methods: # # add_setting(:foo, :default => "default value") # # @option opts [Symbol] :alias_with # # Use `:alias_with` to alias the setter, getter, and predicate to another # name, or names: # # add_setting(:foo, :alias_with => :bar) # add_setting(:foo, :alias_with => [:bar, :baz]) # # Adds a custom setting to the RSpec.configuration object. # # RSpec.configuration.add_setting :foo # # Used internally and by extension frameworks like rspec-rails, so they # can add config settings that are domain specific. For example: # # RSpec.configure do |c| # c.add_setting :use_transactional_fixtures, # :default => true, # :alias_with => :use_transactional_examples # end # # `add_setting` creates three methods on the configuration object, a # setter, a getter, and a predicate: # # RSpec.configuration.foo=(value) # RSpec.configuration.foo # RSpec.configuration.foo? # returns true if foo returns anything but nil or false def add_setting(name, opts={}) default = opts.delete(:default) (class << self; self; end).class_eval do add_setting(name, opts) end send("#{name}=", default) if default end # Returns the configured mock framework adapter module def mock_framework mock_with :rspec unless @mock_framework @mock_framework end # Delegates to mock_framework=(framework) def mock_framework=(framework) mock_with framework end # The patterns to discard from backtraces. Deprecated, use # Configuration#backtrace_exclusion_patterns instead # # Defaults to RSpec::Core::BacktraceCleaner::DEFAULT_EXCLUSION_PATTERNS # # One can replace the list by using the setter or modify it through the # getter # # To override this behaviour and display a full backtrace, use # `--backtrace`on the command line, in a `.rspec` file, or in the # `rspec_options` attribute of RSpec's rake task. def backtrace_clean_patterns RSpec.deprecate("RSpec::Core::Configuration#backtrace_clean_patterns", "RSpec::Core::Configuration#backtrace_exclusion_patterns") @backtrace_cleaner.exclusion_patterns end def backtrace_clean_patterns=(patterns) RSpec.deprecate("RSpec::Core::Configuration#backtrace_clean_patterns", "RSpec::Core::Configuration#backtrace_exclusion_patterns") @backtrace_cleaner.exclusion_patterns = patterns end # The patterns to always include to backtraces. # # Defaults to [Regexp.new Dir.getwd] if the current working directory # matches any of the exclusion patterns. Otherwise it defaults to empty. # # One can replace the list by using the setter or modify it through the # getter def backtrace_inclusion_patterns @backtrace_cleaner.inclusion_patterns end def backtrace_inclusion_patterns=(patterns) @backtrace_cleaner.inclusion_patterns = patterns end # The patterns to discard from backtraces. # # Defaults to RSpec::Core::BacktraceCleaner::DEFAULT_EXCLUSION_PATTERNS # # One can replace the list by using the setter or modify it through the # getter # # To override this behaviour and display a full backtrace, use # `--backtrace`on the command line, in a `.rspec` file, or in the # `rspec_options` attribute of RSpec's rake task. def backtrace_exclusion_patterns @backtrace_cleaner.exclusion_patterns end def backtrace_exclusion_patterns=(patterns) @backtrace_cleaner.exclusion_patterns = patterns end # Sets the mock framework adapter module. # # `framework` can be a Symbol or a Module. # # Given any of `:rspec`, `:mocha`, `:flexmock`, or `:rr`, configures the # named framework. # # Given `:nothing`, configures no framework. Use this if you don't use # any mocking framework to save a little bit of overhead. # # Given a Module, includes that module in every example group. The module # should adhere to RSpec's mock framework adapter API: # # setup_mocks_for_rspec # - called before each example # # verify_mocks_for_rspec # - called after each example. Framework should raise an exception # when expectations fail # # teardown_mocks_for_rspec # - called after verify_mocks_for_rspec (even if there are errors) # # If the module responds to `configuration` and `mock_with` receives a block, # it will yield the configuration object to the block e.g. # # config.mock_with OtherMockFrameworkAdapter do |mod_config| # mod_config.custom_setting = true # end def mock_with(framework) framework_module = case framework when Module framework when String, Symbol require case framework.to_s when /rspec/i 'rspec/core/mocking/with_rspec' when /mocha/i 'rspec/core/mocking/with_mocha' when /rr/i 'rspec/core/mocking/with_rr' when /flexmock/i 'rspec/core/mocking/with_flexmock' else 'rspec/core/mocking/with_absolutely_nothing' end RSpec::Core::MockFrameworkAdapter end new_name, old_name = [framework_module, @mock_framework].map do |mod| mod.respond_to?(:framework_name) ? mod.framework_name : :unnamed end unless new_name == old_name assert_no_example_groups_defined(:mock_framework) end if block_given? raise "#{framework_module} must respond to `configuration` so that mock_with can yield it." unless framework_module.respond_to?(:configuration) yield framework_module.configuration end @mock_framework = framework_module end # Returns the configured expectation framework adapter module(s) def expectation_frameworks expect_with :rspec if @expectation_frameworks.empty? @expectation_frameworks end # Delegates to expect_with(framework) def expectation_framework=(framework) expect_with(framework) end # Sets the expectation framework module(s) to be included in each example # group. # # `frameworks` can be `:rspec`, `:stdlib`, a custom module, or any # combination thereof: # # config.expect_with :rspec # config.expect_with :stdlib # config.expect_with :rspec, :stdlib # config.expect_with OtherExpectationFramework # # RSpec will translate `:rspec` and `:stdlib` into the appropriate # modules. # # ## Configuration # # If the module responds to `configuration`, `expect_with` will # yield the `configuration` object if given a block: # # config.expect_with OtherExpectationFramework do |custom_config| # custom_config.custom_setting = true # end def expect_with(*frameworks) modules = frameworks.map do |framework| case framework when Module framework when :rspec require 'rspec/expectations' self.expecting_with_rspec = true ::RSpec::Matchers when :stdlib require 'test/unit/assertions' ::Test::Unit::Assertions else raise ArgumentError, "#{framework.inspect} is not supported" end end if (modules - @expectation_frameworks).any? assert_no_example_groups_defined(:expect_with) end if block_given? raise "expect_with only accepts a block with a single argument. Call expect_with #{modules.length} times, once with each argument, instead." if modules.length > 1 raise "#{modules.first} must respond to `configuration` so that expect_with can yield it." unless modules.first.respond_to?(:configuration) yield modules.first.configuration end @expectation_frameworks.push(*modules) end def full_backtrace? @backtrace_cleaner.full_backtrace? end def full_backtrace=(true_or_false) @backtrace_cleaner.full_backtrace = true_or_false end def color(output=output_stream) # rspec's built-in formatters all call this with the output argument, # but defaulting to output_stream for backward compatibility with # formatters in extension libs return false unless output_to_tty?(output) value_for(:color, @color) end def color=(bool) if bool if RSpec.windows_os? and not ENV['ANSICON'] warn "You must use ANSICON 1.31 or later (http://adoxa.3eeweb.com/ansicon/) to use colour on Windows" @color = false else @color = true end end end # TODO - deprecate color_enabled - probably not until the last 2.x # release before 3.0 alias_method :color_enabled, :color alias_method :color_enabled=, :color= define_predicate_for :color_enabled, :color def libs=(libs) libs.map do |lib| @libs.unshift lib $LOAD_PATH.unshift lib end end def requires=(paths) RSpec.deprecate("RSpec::Core::Configuration#requires=(paths)", "paths.each {|path| require path}") paths.map {|path| require path} @requires += paths end def debug=(bool) return unless bool begin require 'ruby-debug' Debugger.start rescue LoadError => e raise <<-EOM #{'*'*50} #{e.message} If you have it installed as a ruby gem, then you need to either require 'rubygems' or configure the RUBYOPT environment variable with the value 'rubygems'. #{e.backtrace.join("\n")} #{'*'*50} EOM end end def debug? !!defined?(Debugger) end # Run examples defined on `line_numbers` in all files to run. def line_numbers=(line_numbers) filter_run :line_numbers => line_numbers.map{|l| l.to_i} end def line_numbers filter.fetch(:line_numbers,[]) end def full_description=(description) filter_run :full_description => Regexp.union(*Array(description).map {|d| Regexp.new(d) }) end def full_description filter.fetch :full_description, nil end # @overload add_formatter(formatter) # # Adds a formatter to the formatters collection. `formatter` can be a # string representing any of the built-in formatters (see # `built_in_formatter`), or a custom formatter class. # # ### Note # # For internal purposes, `add_formatter` also accepts the name of a class # and path to a file that contains that class definition, but you should # consider that a private api that may change at any time without notice. def add_formatter(formatter_to_use, path=nil) formatter_class = built_in_formatter(formatter_to_use) || custom_formatter(formatter_to_use) || (raise ArgumentError, "Formatter '#{formatter_to_use}' unknown - maybe you meant 'documentation' or 'progress'?.") formatters << formatter_class.new(path ? file_at(path) : output) end alias_method :formatter=, :add_formatter def formatters @formatters ||= [] end def reporter @reporter ||= begin add_formatter('progress') if formatters.empty? Reporter.new(*formatters) end end # @api private # # Defaults `profile_examples` to 10 examples when `@profile_examples` is `true`. # def profile_examples profile = value_for(:profile_examples, @profile_examples) if profile && !profile.is_a?(Integer) 10 else profile end end # @private def files_or_directories_to_run=(*files) files = files.flatten files << default_path if (command == 'rspec' || Runner.running_in_drb?) && default_path && files.empty? self.files_to_run = get_files_to_run(files) end # Creates a method that delegates to `example` including the submitted # `args`. Used internally to add variants of `example` like `pending`: # # @example # alias_example_to :pending, :pending => true # # # This lets you do this: # # describe Thing do # pending "does something" do # thing = Thing.new # end # end # # # ... which is the equivalent of # # describe Thing do # it "does something", :pending => true do # thing = Thing.new # end # end def alias_example_to(new_name, *args) extra_options = build_metadata_hash_from(args) RSpec::Core::ExampleGroup.alias_example_to(new_name, extra_options) end # Define an alias for it_should_behave_like that allows different # language (like "it_has_behavior" or "it_behaves_like") to be # employed when including shared examples. # # Example: # # alias_it_behaves_like_to(:it_has_behavior, 'has behavior:') # # allows the user to include a shared example group like: # # describe Entity do # it_has_behavior 'sortability' do # let(:sortable) { Entity.new } # end # end # # which is reported in the output as: # # Entity # has behavior: sortability # # sortability examples here def alias_it_behaves_like_to(new_name, report_label = '') RSpec::Core::ExampleGroup.alias_it_behaves_like_to(new_name, report_label) end alias_method :alias_it_should_behave_like_to, :alias_it_behaves_like_to # Adds key/value pairs to the `inclusion_filter`. If the # `treat_symbols_as_metadata_keys_with_true_values` config option is set # to true and `args` includes any symbols that are not part of a hash, # each symbol is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # given this declaration # describe "something", :foo => 'bar' do # # ... # end # # # any of the following will include that group # config.filter_run_including :foo => 'bar' # config.filter_run_including :foo => /^ba/ # config.filter_run_including :foo => lambda {|v| v == 'bar'} # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g. # config.filter_run_including :foo => lambda {|v| v == 'bar'} # # # given a proc with an arity of 2, the lambda is passed the value related to the key, # # and the metadata itself e.g. # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # # with treat_symbols_as_metadata_keys_with_true_values = true # filter_run_including :foo # same as filter_run_including :foo => true def filter_run_including(*args) filter_manager.include_with_low_priority build_metadata_hash_from(args) end alias_method :filter_run, :filter_run_including # Clears and reassigns the `inclusion_filter`. Set to `nil` if you don't # want any inclusion filter at all. # # ### Warning # # This overrides any inclusion filters/tags set on the command line or in # configuration files. def inclusion_filter=(filter) filter_manager.include! build_metadata_hash_from([filter]) end alias_method :filter=, :inclusion_filter= # Returns the `inclusion_filter`. If none has been set, returns an empty # hash. def inclusion_filter filter_manager.inclusions end alias_method :filter, :inclusion_filter # Adds key/value pairs to the `exclusion_filter`. If the # `treat_symbols_as_metadata_keys_with_true_values` config option is set # to true and `args` excludes any symbols that are not part of a hash, # each symbol is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # given this declaration # describe "something", :foo => 'bar' do # # ... # end # # # any of the following will exclude that group # config.filter_run_excluding :foo => 'bar' # config.filter_run_excluding :foo => /^ba/ # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g. # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # # # given a proc with an arity of 2, the lambda is passed the value related to the key, # # and the metadata itself e.g. # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # # with treat_symbols_as_metadata_keys_with_true_values = true # filter_run_excluding :foo # same as filter_run_excluding :foo => true def filter_run_excluding(*args) filter_manager.exclude_with_low_priority build_metadata_hash_from(args) end # Clears and reassigns the `exclusion_filter`. Set to `nil` if you don't # want any exclusion filter at all. # # ### Warning # # This overrides any exclusion filters/tags set on the command line or in # configuration files. def exclusion_filter=(filter) filter_manager.exclude! build_metadata_hash_from([filter]) end # Returns the `exclusion_filter`. If none has been set, returns an empty # hash. def exclusion_filter filter_manager.exclusions end # Tells RSpec to include `mod` in example groups. Methods defined in # `mod` are exposed to examples (not example groups). Use `filters` to # constrain the groups in which to include the module. # # @example # # module AuthenticationHelpers # def login_as(user) # # ... # end # end # # module UserHelpers # def users(username) # # ... # end # end # # RSpec.configure do |config| # config.include(UserHelpers) # included in all modules # config.include(AuthenticationHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # it "can be viewed by owning user" do # login_as users(:jdoe) # get "/profiles/jdoe" # assert_select ".username", :text => 'jdoe' # end # end # # @see #extend def include(mod, *filters) include_or_extend_modules << [:include, mod, build_metadata_hash_from(filters)] end # Tells RSpec to extend example groups with `mod`. Methods defined in # `mod` are exposed to example groups (not examples). Use `filters` to # constrain the groups to extend. # # Similar to `include`, but behavior is added to example groups, which # are classes, rather than the examples, which are instances of those # classes. # # @example # # module UiHelpers # def run_in_browser # # ... # end # end # # RSpec.configure do |config| # config.extend(UiHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # run_in_browser # # it "does stuff in the client" do # # ... # end # end # # @see #include def extend(mod, *filters) include_or_extend_modules << [:extend, mod, build_metadata_hash_from(filters)] end # @private # # Used internally to extend a group with modules using `include` and/or # `extend`. def configure_group(group) include_or_extend_modules.each do |include_or_extend, mod, filters| next unless filters.empty? || group.any_apply?(filters) send("safe_#{include_or_extend}", mod, group) end end # @private def safe_include(mod, host) host.send(:include,mod) unless host < mod end # @private def setup_load_path_and_require(paths) directories = ['lib', default_path].select { |p| File.directory? p } RSpec::Core::RubyProject.add_to_load_path(*directories) paths.each {|path| require path} @requires += paths end # @private if RUBY_VERSION.to_f >= 1.9 def safe_extend(mod, host) host.extend(mod) unless (class << host; self; end) < mod end else def safe_extend(mod, host) host.extend(mod) unless (class << host; self; end).included_modules.include?(mod) end end # @private def configure_mock_framework RSpec::Core::ExampleGroup.send(:include, mock_framework) end # @private def configure_expectation_framework expectation_frameworks.each do |framework| RSpec::Core::ExampleGroup.send(:include, framework) end end # @private def load_spec_files files_to_run.uniq.each {|f| load File.expand_path(f) } raise_if_rspec_1_is_loaded end # @private DEFAULT_FORMATTER = lambda { |string| string } # Formats the docstring output using the block provided. # # @example # # This will strip the descriptions of both examples and example groups. # RSpec.configure do |config| # config.format_docstrings { |s| s.strip } # end def format_docstrings(&block) @format_docstrings_block = block_given? ? block : DEFAULT_FORMATTER end # @private def format_docstrings_block @format_docstrings_block ||= DEFAULT_FORMATTER end # @api # # Sets the seed value and sets `order='rand'` def seed=(seed) order_and_seed_from_seed(seed) end # @api # # Sets the order and, if order is `'rand:<seed>'`, also sets the seed. def order=(type) order_and_seed_from_order(type) end def randomize? order.to_s.match(/rand/) end # @private DEFAULT_ORDERING = lambda { |list| list } # @private RANDOM_ORDERING = lambda do |list| Kernel.srand RSpec.configuration.seed ordering = list.sort_by { Kernel.rand(list.size) } Kernel.srand # reset random generation ordering end # Sets a strategy by which to order examples. # # @example # RSpec.configure do |config| # config.order_examples do |examples| # examples.reverse # end # end # # @see #order_groups # @see #order_groups_and_examples # @see #order= # @see #seed= def order_examples(&block) @example_ordering_block = block @order = "custom" unless built_in_orderer?(block) end # @private def example_ordering_block @example_ordering_block ||= DEFAULT_ORDERING end # Sets a strategy by which to order groups. # # @example # RSpec.configure do |config| # config.order_groups do |groups| # groups.reverse # end # end # # @see #order_examples # @see #order_groups_and_examples # @see #order= # @see #seed= def order_groups(&block) @group_ordering_block = block @order = "custom" unless built_in_orderer?(block) end # @private def group_ordering_block @group_ordering_block ||= DEFAULT_ORDERING end # Sets a strategy by which to order groups and examples. # # @example # RSpec.configure do |config| # config.order_groups_and_examples do |groups_or_examples| # groups_or_examples.reverse # end # end # # @see #order_groups # @see #order_examples # @see #order= # @see #seed= def order_groups_and_examples(&block) order_groups(&block) order_examples(&block) end # Set Ruby warnings on or off def warnings= value $VERBOSE = !!value end def warnings $VERBOSE end private def get_files_to_run(paths) paths.map do |path| path = path.gsub(File::ALT_SEPARATOR, File::SEPARATOR) if File::ALT_SEPARATOR File.directory?(path) ? gather_directories(path) : extract_location(path) end.flatten.sort end def gather_directories(path) stripped = "{#{pattern.gsub(/\s*,\s*/, ',')}}" files = pattern =~ /^#{Regexp.escape path}/ ? Dir[stripped] : Dir["#{path}/#{stripped}"] files.sort end def extract_location(path) if path =~ /^(.*?)((?:\:\d+)+)$/ path, lines = $1, $2[1..-1].split(":").map{|n| n.to_i} filter_manager.add_location path, lines end path end def command $0.split(File::SEPARATOR).last end def value_for(key, default=nil) @preferred_options.has_key?(key) ? @preferred_options[key] : default end def assert_no_example_groups_defined(config_option) if RSpec.world.example_groups.any? raise MustBeConfiguredBeforeExampleGroupsError.new( "RSpec's #{config_option} configuration option must be configured before " + "any example groups are defined, but you have already defined a group." ) end end def raise_if_rspec_1_is_loaded if defined?(Spec) && defined?(Spec::VERSION::MAJOR) && Spec::VERSION::MAJOR == 1 raise <<-MESSAGE #{'*'*80} You are running rspec-2, but it seems as though rspec-1 has been loaded as well. This is likely due to a statement like this somewhere in the specs: require 'spec' Please locate that statement, remove it, and try again. #{'*'*80} MESSAGE end end def output_to_tty?(output=output_stream) tty? || (output.respond_to?(:tty?) && output.tty?) end def built_in_formatter(key) case key.to_s when 'd', 'doc', 'documentation', 's', 'n', 'spec', 'nested' require 'rspec/core/formatters/documentation_formatter' RSpec::Core::Formatters::DocumentationFormatter when 'h', 'html' require 'rspec/core/formatters/html_formatter' RSpec::Core::Formatters::HtmlFormatter when 't', 'textmate' require 'rspec/core/formatters/text_mate_formatter' RSpec::Core::Formatters::TextMateFormatter when 'p', 'progress' require 'rspec/core/formatters/progress_formatter' RSpec::Core::Formatters::ProgressFormatter when 'j', 'json' require 'rspec/core/formatters/json_formatter' RSpec::Core::Formatters::JsonFormatter end end def custom_formatter(formatter_ref) if Class === formatter_ref formatter_ref elsif string_const?(formatter_ref) begin eval(formatter_ref) rescue NameError require path_for(formatter_ref) eval(formatter_ref) end end end def string_const?(str) str.is_a?(String) && /\A[A-Z][a-zA-Z0-9_:]*\z/ =~ str end def path_for(const_ref) underscore_with_fix_for_non_standard_rspec_naming(const_ref) end def underscore_with_fix_for_non_standard_rspec_naming(string) underscore(string).sub(%r{(^|/)r_spec($|/)}, '\\1rspec\\2') end # activesupport/lib/active_support/inflector/methods.rb, line 48 def underscore(camel_cased_word) word = camel_cased_word.to_s.dup word.gsub!(/::/, '/') word.gsub!(/([A-Z]+)([A-Z][a-z])/,'\1_\2') word.gsub!(/([a-z\d])([A-Z])/,'\1_\2') word.tr!("-", "_") word.downcase! word end def file_at(path) FileUtils.mkdir_p(File.dirname(path)) File.new(path, 'w') end def order_and_seed_from_seed(value) order_groups_and_examples(&RANDOM_ORDERING) @order, @seed = 'rand', value.to_i [@order, @seed] end def set_order_and_seed(hash) hash[:order], seed = order_and_seed_from_order(hash[:order]) hash[:seed] = seed if seed end def order_and_seed_from_order(type) order, seed = type.to_s.split(':') @order = order @seed = seed = seed.to_i if seed if randomize? order_groups_and_examples(&RANDOM_ORDERING) elsif order == 'default' @order, @seed = nil, nil order_groups_and_examples(&DEFAULT_ORDERING) end return order, seed end def built_in_orderer?(block) [DEFAULT_ORDERING, RANDOM_ORDERING].include?(block) end end end end
1
9,178
This could be more indicative of what it is... e.g... `stresstest_time_limit`
rspec-rspec-core
rb
@@ -279,7 +279,6 @@ bool PDPSimple::createPDPEndpoints() delete mp_listener; mp_listener = nullptr; reader_payload_pool_->release_history(reader_pool_cfg, true); - TopicPayloadPoolRegistry::release(reader_payload_pool_); return false; }
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file PDPSimple.cpp * */ #include <fastdds/rtps/builtin/discovery/participant/PDPSimple.h> #include <fastdds/rtps/builtin/discovery/participant/PDPListener.h> #include <fastdds/rtps/builtin/discovery/endpoint/EDPSimple.h> #include <fastdds/rtps/builtin/discovery/endpoint/EDPStatic.h> #include <fastdds/rtps/resources/TimedEvent.h> #include <fastdds/rtps/builtin/BuiltinProtocols.h> #include <fastdds/rtps/builtin/liveliness/WLP.h> #include <fastdds/rtps/builtin/data/ParticipantProxyData.h> #include <fastdds/rtps/builtin/data/ReaderProxyData.h> #include <fastdds/rtps/builtin/data/WriterProxyData.h> #include <fastdds/rtps/participant/RTPSParticipantListener.h> #include <fastdds/rtps/writer/StatelessWriter.h> #include <fastdds/rtps/resources/AsyncWriterThread.h> #include <fastdds/rtps/reader/StatelessReader.h> #include <fastdds/rtps/reader/StatefulReader.h> #include <fastdds/rtps/history/WriterHistory.h> #include <fastdds/rtps/history/ReaderHistory.h> #include <fastdds/dds/builtin/typelookup/TypeLookupManager.hpp> #include <fastrtps/utils/TimeConversion.h> #include <fastrtps/utils/IPLocator.h> #include <rtps/history/TopicPayloadPoolRegistry.hpp> #include <rtps/participant/RTPSParticipantImpl.h> #include <fastdds/dds/log/Log.hpp> #include <mutex> using namespace eprosima::fastrtps; namespace eprosima { namespace fastrtps { namespace rtps { PDPSimple::PDPSimple ( BuiltinProtocols* built, const RTPSParticipantAllocationAttributes& allocation) : PDP(built, allocation) { } PDPSimple::~PDPSimple() { } void PDPSimple::initializeParticipantProxyData( ParticipantProxyData* participant_data) { PDP::initializeParticipantProxyData(participant_data); if (getRTPSParticipant()->getAttributes().builtin.discovery_config. use_SIMPLE_EndpointDiscoveryProtocol) { if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP. use_PublicationWriterANDSubscriptionReader) { participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_ANNOUNCER; participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_DETECTOR; } if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP. use_PublicationReaderANDSubscriptionWriter) { participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_DETECTOR; participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_ANNOUNCER; } #if HAVE_SECURITY if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP. enable_builtin_secure_publications_writer_and_subscriptions_reader) { participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_ANNOUNCER; participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_DETECTOR; } if (getRTPSParticipant()->getAttributes().builtin.discovery_config.m_simpleEDP. enable_builtin_secure_subscriptions_writer_and_publications_reader) { participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_SUBSCRIPTION_SECURE_ANNOUNCER; participant_data->m_availableBuiltinEndpoints |= DISC_BUILTIN_ENDPOINT_PUBLICATION_SECURE_DETECTOR; } #endif // if HAVE_SECURITY } else if (!getRTPSParticipant()->getAttributes().builtin.discovery_config. use_STATIC_EndpointDiscoveryProtocol) { logError(RTPS_PDP, "Neither EDP simple nor EDP static enabled. Endpoints will not be discovered."); } } bool PDPSimple::init( RTPSParticipantImpl* part) { // The DATA(p) must be processed after EDP endpoint creation if (!PDP::initPDP(part)) { return false; } //INIT EDP if (m_discovery.discovery_config.use_STATIC_EndpointDiscoveryProtocol) { mp_EDP = new EDPStatic(this, mp_RTPSParticipant); if (!mp_EDP->initEDP(m_discovery)) { logError(RTPS_PDP, "Endpoint discovery configuration failed"); return false; } } else if (m_discovery.discovery_config.use_SIMPLE_EndpointDiscoveryProtocol) { mp_EDP = new EDPSimple(this, mp_RTPSParticipant); if (!mp_EDP->initEDP(m_discovery)) { logError(RTPS_PDP, "Endpoint discovery configuration failed"); return false; } } else { logWarning(RTPS_PDP, "No EndpointDiscoveryProtocol defined"); return false; } return true; } ParticipantProxyData* PDPSimple::createParticipantProxyData( const ParticipantProxyData& participant_data, const GUID_t&) { std::unique_lock<std::recursive_mutex> lock(*getMutex()); // decide if we dismiss the participant using the ParticipantFilteringFlags const ParticipantFilteringFlags_t& flags = m_discovery.discovery_config.ignoreParticipantFlags; if (flags != ParticipantFilteringFlags_t::NO_FILTER) { const GUID_t& remote = participant_data.m_guid; const GUID_t& local = getLocalParticipantProxyData()->m_guid; if (!local.is_on_same_host_as(remote)) { if (flags & ParticipantFilteringFlags::FILTER_DIFFERENT_HOST) { return nullptr; } } else { bool filter_same = (flags& ParticipantFilteringFlags::FILTER_SAME_PROCESS) != 0; bool filter_different = (flags& ParticipantFilteringFlags::FILTER_DIFFERENT_PROCESS) != 0; if (filter_same && filter_different) { return nullptr; } bool is_same = local.is_on_same_process_as(remote); if ((filter_same && is_same) || (filter_different && !is_same)) { return nullptr; } } } ParticipantProxyData* pdata = add_participant_proxy_data(participant_data.m_guid, true); if (pdata != nullptr) { pdata->copy(participant_data); pdata->isAlive = true; pdata->lease_duration_event->update_interval(pdata->m_leaseDuration); pdata->lease_duration_event->restart_timer(); } return pdata; } // EDPStatic requires matching on ParticipantProxyData property updates bool PDPSimple::updateInfoMatchesEDP() { return dynamic_cast<EDPStatic*>(mp_EDP) != nullptr; } void PDPSimple::announceParticipantState( bool new_change, bool dispose, WriteParams& wp) { PDP::announceParticipantState(new_change, dispose, wp); if (!(dispose || new_change)) { StatelessWriter* pW = dynamic_cast<StatelessWriter*>(mp_PDPWriter); if (pW != nullptr) { pW->unsent_changes_reset(); } else { logError(RTPS_PDP, "Using PDPSimple protocol with a reliable writer"); } } } bool PDPSimple::createPDPEndpoints() { logInfo(RTPS_PDP, "Beginning"); const RTPSParticipantAllocationAttributes& allocation = mp_RTPSParticipant->getRTPSParticipantAttributes().allocation; //SPDP BUILTIN RTPSParticipant READER HistoryAttributes hatt; hatt.payloadMaxSize = mp_builtin->m_att.readerPayloadSize; hatt.memoryPolicy = mp_builtin->m_att.readerHistoryMemoryPolicy; hatt.initialReservedCaches = 25; if (allocation.participants.initial > 0) { hatt.initialReservedCaches = (int32_t)allocation.participants.initial; } if (allocation.participants.maximum < std::numeric_limits<size_t>::max()) { hatt.maximumReservedCaches = (int32_t)allocation.participants.maximum; } PoolConfig reader_pool_cfg = PoolConfig::from_history_attributes(hatt); reader_payload_pool_ = TopicPayloadPoolRegistry::get("DCPSParticipant", reader_pool_cfg); reader_payload_pool_->reserve_history(reader_pool_cfg, true); mp_PDPReaderHistory = new ReaderHistory(hatt); ReaderAttributes ratt; ratt.endpoint.multicastLocatorList = mp_builtin->m_metatrafficMulticastLocatorList; ratt.endpoint.unicastLocatorList = mp_builtin->m_metatrafficUnicastLocatorList; ratt.endpoint.topicKind = WITH_KEY; ratt.endpoint.durabilityKind = TRANSIENT_LOCAL; ratt.endpoint.reliabilityKind = BEST_EFFORT; ratt.matched_writers_allocation = allocation.participants; mp_listener = new PDPListener(this); if (mp_RTPSParticipant->createReader(&mp_PDPReader, ratt, reader_payload_pool_, mp_PDPReaderHistory, mp_listener, c_EntityId_SPDPReader, true, false)) { #if HAVE_SECURITY mp_RTPSParticipant->set_endpoint_rtps_protection_supports(mp_PDPReader, false); #endif // if HAVE_SECURITY } else { logError(RTPS_PDP, "SimplePDP Reader creation failed"); delete mp_PDPReaderHistory; mp_PDPReaderHistory = nullptr; delete mp_listener; mp_listener = nullptr; reader_payload_pool_->release_history(reader_pool_cfg, true); TopicPayloadPoolRegistry::release(reader_payload_pool_); return false; } //SPDP BUILTIN RTPSParticipant WRITER hatt.payloadMaxSize = mp_builtin->m_att.writerPayloadSize; hatt.initialReservedCaches = 1; hatt.maximumReservedCaches = 1; hatt.memoryPolicy = mp_builtin->m_att.writerHistoryMemoryPolicy; PoolConfig writer_pool_cfg = PoolConfig::from_history_attributes(hatt); writer_payload_pool_ = TopicPayloadPoolRegistry::get("DCPSParticipant", writer_pool_cfg); writer_payload_pool_->reserve_history(writer_pool_cfg, false); mp_PDPWriterHistory = new WriterHistory(hatt); WriterAttributes watt; watt.endpoint.endpointKind = WRITER; watt.endpoint.durabilityKind = TRANSIENT_LOCAL; watt.endpoint.reliabilityKind = BEST_EFFORT; watt.endpoint.topicKind = WITH_KEY; watt.endpoint.remoteLocatorList = m_discovery.initialPeersList; watt.matched_readers_allocation = allocation.participants; if (mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.bytesPerPeriod != UINT32_MAX && mp_RTPSParticipant->getRTPSParticipantAttributes().throughputController.periodMillisecs != 0) { watt.mode = ASYNCHRONOUS_WRITER; } RTPSWriter* wout; if (mp_RTPSParticipant->createWriter(&wout, watt, writer_payload_pool_, mp_PDPWriterHistory, nullptr, c_EntityId_SPDPWriter, true)) { #if HAVE_SECURITY mp_RTPSParticipant->set_endpoint_rtps_protection_supports(wout, false); #endif // if HAVE_SECURITY mp_PDPWriter = wout; if (mp_PDPWriter != nullptr) { const NetworkFactory& network = mp_RTPSParticipant->network_factory(); LocatorList_t fixed_locators; Locator_t local_locator; for (const Locator_t& loc : mp_builtin->m_initialPeersList) { if (network.transform_remote_locator(loc, local_locator)) { fixed_locators.push_back(local_locator); } } dynamic_cast<StatelessWriter*>(wout)->set_fixed_locators(fixed_locators); } } else { logError(RTPS_PDP, "SimplePDP Writer creation failed"); delete mp_PDPWriterHistory; mp_PDPWriterHistory = nullptr; writer_payload_pool_->release_history(writer_pool_cfg, false); TopicPayloadPoolRegistry::release(writer_payload_pool_); return false; } logInfo(RTPS_PDP, "SPDP Endpoints creation finished"); return true; } void PDPSimple::assignRemoteEndpoints( ParticipantProxyData* pdata) { logInfo(RTPS_PDP, "For RTPSParticipant: " << pdata->m_guid.guidPrefix); const NetworkFactory& network = mp_RTPSParticipant->network_factory(); uint32_t endp = pdata->m_availableBuiltinEndpoints; uint32_t auxendp = endp; bool use_multicast_locators = !mp_RTPSParticipant->getAttributes().builtin.avoid_builtin_multicast || pdata->metatraffic_locators.unicast.empty(); auxendp &= DISC_BUILTIN_ENDPOINT_PARTICIPANT_ANNOUNCER; if (auxendp != 0) { std::lock_guard<std::mutex> data_guard(temp_data_lock_); temp_writer_data_.clear(); temp_writer_data_.guid().guidPrefix = pdata->m_guid.guidPrefix; temp_writer_data_.guid().entityId = c_EntityId_SPDPWriter; temp_writer_data_.persistence_guid(pdata->get_persistence_guid()); temp_writer_data_.set_persistence_entity_id(c_EntityId_SPDPWriter); temp_writer_data_.set_remote_locators(pdata->metatraffic_locators, network, use_multicast_locators); temp_writer_data_.m_qos.m_reliability.kind = RELIABLE_RELIABILITY_QOS; temp_writer_data_.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS; mp_PDPReader->matched_writer_add(temp_writer_data_); } auxendp = endp; auxendp &= DISC_BUILTIN_ENDPOINT_PARTICIPANT_DETECTOR; if (auxendp != 0) { std::lock_guard<std::mutex> data_guard(temp_data_lock_); temp_reader_data_.clear(); temp_reader_data_.m_expectsInlineQos = false; temp_reader_data_.guid().guidPrefix = pdata->m_guid.guidPrefix; temp_reader_data_.guid().entityId = c_EntityId_SPDPReader; temp_reader_data_.set_remote_locators(pdata->metatraffic_locators, network, use_multicast_locators); temp_reader_data_.m_qos.m_reliability.kind = BEST_EFFORT_RELIABILITY_QOS; temp_reader_data_.m_qos.m_durability.kind = TRANSIENT_LOCAL_DURABILITY_QOS; mp_PDPWriter->matched_reader_add(temp_reader_data_); } #if HAVE_SECURITY // Validate remote participant mp_RTPSParticipant->security_manager().discovered_participant(*pdata); #else //Inform EDP of new RTPSParticipant data: notifyAboveRemoteEndpoints(*pdata); #endif // if HAVE_SECURITY } void PDPSimple::removeRemoteEndpoints( ParticipantProxyData* pdata) { logInfo(RTPS_PDP, "For RTPSParticipant: " << pdata->m_guid); uint32_t endp = pdata->m_availableBuiltinEndpoints; uint32_t auxendp = endp; auxendp &= DISC_BUILTIN_ENDPOINT_PARTICIPANT_ANNOUNCER; if (auxendp != 0) { GUID_t writer_guid(pdata->m_guid.guidPrefix, c_EntityId_SPDPWriter); mp_PDPReader->matched_writer_remove(writer_guid); } auxendp = endp; auxendp &= DISC_BUILTIN_ENDPOINT_PARTICIPANT_DETECTOR; if (auxendp != 0) { GUID_t reader_guid(pdata->m_guid.guidPrefix, c_EntityId_SPDPReader); mp_PDPWriter->matched_reader_remove(reader_guid); } } void PDPSimple::notifyAboveRemoteEndpoints( const ParticipantProxyData& pdata) { //Inform EDP of new RTPSParticipant data: if (mp_EDP != nullptr) { mp_EDP->assignRemoteEndpoints(pdata); } if (mp_builtin->mp_WLP != nullptr) { mp_builtin->mp_WLP->assignRemoteEndpoints(pdata); } if (mp_builtin->tlm_ != nullptr) { mp_builtin->tlm_->assign_remote_endpoints(pdata); } } bool PDPSimple::newRemoteEndpointStaticallyDiscovered( const GUID_t& pguid, int16_t userDefinedId, EndpointKind_t kind) { string_255 pname; if (lookup_participant_name(pguid, pname)) { if (kind == WRITER) { dynamic_cast<EDPStatic*>(mp_EDP)->newRemoteWriter(pguid, pname, userDefinedId); } else { dynamic_cast<EDPStatic*>(mp_EDP)->newRemoteReader(pguid, pname, userDefinedId); } } return false; } } /* namespace rtps */ } /* namespace fastrtps */ } /* namespace eprosima */
1
21,519
Shouldn't we do a `reader_payload_pool_.reset()` here?
eProsima-Fast-DDS
cpp
@@ -33,9 +33,9 @@ type Block struct { // ElectionProof is the vrf proof giving this block's miner authoring rights ElectionProof *crypto.ElectionProof - // DrandEntries contain the verifiable oracle randomness used to elect + // BeaconEntries contain the verifiable oracle randomness used to elect // this block's author leader - DrandEntries []*drand.Entry + BeaconEntries []*drand.Entry // PoStProofs are the winning post proofs PoStProofs []PoStProof `json:"PoStProofs"`
1
package block import ( "encoding/json" "fmt" "github.com/filecoin-project/go-address" "github.com/filecoin-project/specs-actors/actors/abi" fbig "github.com/filecoin-project/specs-actors/actors/abi/big" blocks "github.com/ipfs/go-block-format" cid "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" node "github.com/ipfs/go-ipld-format" "github.com/filecoin-project/go-filecoin/internal/pkg/constants" "github.com/filecoin-project/go-filecoin/internal/pkg/crypto" "github.com/filecoin-project/go-filecoin/internal/pkg/drand" e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid" "github.com/filecoin-project/go-filecoin/internal/pkg/encoding" ) // Block is a block in the blockchain. type Block struct { // control field for encoding struct as an array _ struct{} `cbor:",toarray"` // Miner is the address of the miner actor that mined this block. Miner address.Address `json:"miner"` // Ticket is the ticket submitted with this block. Ticket Ticket `json:"ticket"` // ElectionProof is the vrf proof giving this block's miner authoring rights ElectionProof *crypto.ElectionProof // DrandEntries contain the verifiable oracle randomness used to elect // this block's author leader DrandEntries []*drand.Entry // PoStProofs are the winning post proofs PoStProofs []PoStProof `json:"PoStProofs"` // Parents is the set of parents this block was based on. Typically one, // but can be several in the case where there were multiple winning ticket- // holders for an epoch. Parents TipSetKey `json:"parents"` // ParentWeight is the aggregate chain weight of the parent set. ParentWeight fbig.Int `json:"parentWeight"` // Height is the chain height of this block. Height abi.ChainEpoch `json:"height"` // StateRoot is the CID of the root of the state tree after application of the messages in the parent tipset // to the parent tipset's state root. StateRoot e.Cid `json:"stateRoot,omitempty"` // MessageReceipts is a list of receipts corresponding to the application of the messages in the parent tipset // to the parent tipset's state root (corresponding to this block's StateRoot). MessageReceipts e.Cid `json:"messageReceipts,omitempty"` // Messages is the set of messages included in this block Messages e.Cid `json:"messages,omitempty"` // The aggregate signature of all BLS signed messages in the block BLSAggregateSig *crypto.Signature `json:"blsAggregateSig"` // The timestamp, in seconds since the Unix epoch, at which this block was created. Timestamp uint64 `json:"timestamp"` // The signature of the miner's worker key over the block BlockSig *crypto.Signature `json:"blocksig"` // ForkSignaling is extra data used by miners to communicate ForkSignaling uint64 cachedCid cid.Cid cachedBytes []byte } // IndexMessagesField is the message field position in the encoded block const IndexMessagesField = 10 // IndexParentsField is the parents field position in the encoded block const IndexParentsField = 5 // Cid returns the content id of this block. func (b *Block) Cid() cid.Cid { if b.cachedCid == cid.Undef { if b.cachedBytes == nil { bytes, err := encoding.Encode(b) if err != nil { panic(err) } b.cachedBytes = bytes } c, err := constants.DefaultCidBuilder.Sum(b.cachedBytes) if err != nil { panic(err) } b.cachedCid = c } return b.cachedCid } // ToNode converts the Block to an IPLD node. func (b *Block) ToNode() node.Node { data, err := encoding.Encode(b) if err != nil { panic(err) } c, err := constants.DefaultCidBuilder.Sum(data) if err != nil { panic(err) } blk, err := blocks.NewBlockWithCid(data, c) if err != nil { panic(err) } node, err := cbor.DecodeBlock(blk) if err != nil { panic(err) } return node } func (b *Block) String() string { errStr := "(error encoding Block)" cid := b.Cid() js, err := json.MarshalIndent(b, "", " ") if err != nil { return errStr } return fmt.Sprintf("Block cid=[%v]: %s", cid, string(js)) } // DecodeBlock decodes raw cbor bytes into a Block. func DecodeBlock(b []byte) (*Block, error) { var out Block if err := encoding.Decode(b, &out); err != nil { return nil, err } out.cachedBytes = b return &out, nil } // Equals returns true if the Block is equal to other. func (b *Block) Equals(other *Block) bool { return b.Cid().Equals(other.Cid()) } // SignatureData returns the block's bytes with a null signature field for // signature creation and verification func (b *Block) SignatureData() []byte { tmp := &Block{ Miner: b.Miner, Ticket: b.Ticket, ElectionProof: b.ElectionProof, Parents: b.Parents, ParentWeight: b.ParentWeight, Height: b.Height, Messages: b.Messages, StateRoot: b.StateRoot, MessageReceipts: b.MessageReceipts, PoStProofs: b.PoStProofs, DrandEntries: b.DrandEntries, Timestamp: b.Timestamp, BLSAggregateSig: b.BLSAggregateSig, ForkSignaling: b.ForkSignaling, // BlockSig omitted } return tmp.ToNode().RawData() }
1
23,707
I'd prefer the nomenclature less tightly coupled to DRAND throughout. I played with renaming the `drand` package to `beacon` but it was too much noise in this change.
filecoin-project-venus
go