blob: 3f6638b997e90a3e0b052d19cb302ef5c0237c50 [file] [log] [blame]
/*
* Copyright (c) 2015-2017 The Khronos Group Inc.
* Copyright (c) 2015-2017 Valve Corporation
* Copyright (c) 2015-2017 LunarG, Inc.
* Copyright (c) 2015-2017 Google, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Author: Chia-I Wu <olvaffe@gmail.com>
* Author: Chris Forbes <chrisf@ijw.co.nz>
* Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Mike Stroyan <mike@LunarG.com>
* Author: Tobin Ehlis <tobine@google.com>
* Author: Tony Barbour <tony@LunarG.com>
* Author: Cody Northrop <cnorthrop@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
* Author: Jeremy Kniager <jeremyk@lunarg.com>
*/
#ifdef ANDROID
#include "vulkan_wrapper.h"
#else
#define NOMINMAX
#include <vulkan/vulkan.h>
#endif
#include "layers/vk_device_profile_api_layer.h"
#if defined(ANDROID) && defined(VALIDATION_APK)
#include <android/log.h>
#include <android_native_app_glue.h>
#endif
#include "icd-spv.h"
#include "test_common.h"
#include "vk_layer_config.h"
#include "vk_format_utils.h"
#include "vk_validation_error_messages.h"
#include "vkrenderframework.h"
#include "vk_typemap_helper.h"
#include <algorithm>
#include <cmath>
#include <functional>
#include <limits>
#include <memory>
#include <unordered_set>
//--------------------------------------------------------------------------------------
// Mesh and VertexFormat Data
//--------------------------------------------------------------------------------------
const char *kSkipPrefix = " TEST SKIPPED:";
enum BsoFailSelect {
BsoFailNone,
BsoFailLineWidth,
BsoFailDepthBias,
BsoFailViewport,
BsoFailScissor,
BsoFailBlend,
BsoFailDepthBounds,
BsoFailStencilReadMask,
BsoFailStencilWriteMask,
BsoFailStencilReference,
BsoFailCmdClearAttachments,
BsoFailIndexBuffer,
BsoFailIndexBufferBadSize,
BsoFailIndexBufferBadOffset,
BsoFailIndexBufferBadMapSize,
BsoFailIndexBufferBadMapOffset
};
static const char bindStateVertShaderText[] =
"#version 450\n"
"vec2 vertices[3];\n"
"void main() {\n"
" vertices[0] = vec2(-1.0, -1.0);\n"
" vertices[1] = vec2( 1.0, -1.0);\n"
" vertices[2] = vec2( 0.0, 1.0);\n"
" gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0);\n"
"}\n";
static const char bindStateFragShaderText[] =
"#version 450\n"
"\n"
"layout(location = 0) out vec4 uFragColor;\n"
"void main(){\n"
" uFragColor = vec4(0,1,0,1);\n"
"}\n";
// Static arrays helper
template <class ElementT, size_t array_size>
size_t size(ElementT (&)[array_size]) {
return array_size;
}
// Format search helper
VkFormat FindSupportedDepthStencilFormat(VkPhysicalDevice phy) {
VkFormat ds_formats[] = {VK_FORMAT_D16_UNORM_S8_UINT, VK_FORMAT_D24_UNORM_S8_UINT, VK_FORMAT_D32_SFLOAT_S8_UINT};
for (uint32_t i = 0; i < sizeof(ds_formats); i++) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(phy, ds_formats[i], &format_props);
if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
return ds_formats[i];
}
}
return VK_FORMAT_UNDEFINED;
}
// Returns true if *any* requested features are available.
// Assumption is that the framework can successfully create an image as
// long as at least one of the feature bits is present (excepting VTX_BUF).
bool ImageFormatIsSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL,
VkFormatFeatureFlags features = ~VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(phy, format, &format_props);
VkFormatFeatureFlags phy_features =
(VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures);
return (0 != (phy_features & features));
}
// Returns true if format and *all* requested features are available.
bool ImageFormatAndFeaturesSupported(VkPhysicalDevice phy, VkFormat format, VkImageTiling tiling, VkFormatFeatureFlags features) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(phy, format, &format_props);
VkFormatFeatureFlags phy_features =
(VK_IMAGE_TILING_OPTIMAL == tiling ? format_props.optimalTilingFeatures : format_props.linearTilingFeatures);
return (features == (phy_features & features));
}
// Returns true if format and *all* requested features are available.
bool ImageFormatAndFeaturesSupported(const VkInstance inst, const VkPhysicalDevice phy, const VkImageCreateInfo info,
const VkFormatFeatureFlags features) {
// Verify physical device support of format features
if (!ImageFormatAndFeaturesSupported(phy, info.format, info.tiling, features)) {
return false;
}
// Verify that PhysDevImageFormatProp() also claims support for the specific usage
VkImageFormatProperties props;
VkResult err =
vkGetPhysicalDeviceImageFormatProperties(phy, info.format, info.imageType, info.tiling, info.usage, info.flags, &props);
if (VK_SUCCESS != err) {
return false;
}
#if 0 // Convinced this chunk doesn't currently add any additional info, but leaving in place because it may be
// necessary with future extensions
// Verify again using version 2, if supported, which *can* return more property data than the original...
// (It's not clear that this is any more definitive than using the original version - but no harm)
PFN_vkGetPhysicalDeviceImageFormatProperties2KHR p_GetPDIFP2KHR =
(PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)vkGetInstanceProcAddr(inst,
"vkGetPhysicalDeviceImageFormatProperties2KHR");
if (NULL != p_GetPDIFP2KHR) {
VkPhysicalDeviceImageFormatInfo2KHR fmt_info{};
fmt_info.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR;
fmt_info.pNext = nullptr;
fmt_info.format = info.format;
fmt_info.type = info.imageType;
fmt_info.tiling = info.tiling;
fmt_info.usage = info.usage;
fmt_info.flags = info.flags;
VkImageFormatProperties2KHR fmt_props = {};
fmt_props.sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR;
err = p_GetPDIFP2KHR(phy, &fmt_info, &fmt_props);
if (VK_SUCCESS != err) {
return false;
}
}
#endif
return true;
}
// Validation report callback prototype
static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject,
size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg,
void *pUserData);
// Simple sane SamplerCreateInfo boilerplate
static VkSamplerCreateInfo SafeSaneSamplerCreateInfo() {
VkSamplerCreateInfo sampler_create_info = {};
sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
sampler_create_info.pNext = nullptr;
sampler_create_info.magFilter = VK_FILTER_NEAREST;
sampler_create_info.minFilter = VK_FILTER_NEAREST;
sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sampler_create_info.mipLodBias = 1.0;
sampler_create_info.anisotropyEnable = VK_FALSE;
sampler_create_info.maxAnisotropy = 1.0;
sampler_create_info.compareEnable = VK_FALSE;
sampler_create_info.compareOp = VK_COMPARE_OP_NEVER;
sampler_create_info.minLod = 1.0;
sampler_create_info.maxLod = 1.0;
sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE;
sampler_create_info.unnormalizedCoordinates = VK_FALSE;
return sampler_create_info;
}
// Dependent "false" type for the static assert, as GCC will evaluate
// non-dependent static_asserts even for non-instantiated templates
template <typename T>
struct AlwaysFalse : std::false_type {};
// Helpers to get nearest greater or smaller value (of float) -- useful for testing the boundary cases of Vulkan limits
template <typename T>
T NearestGreater(const T from) {
using Lim = std::numeric_limits<T>;
const auto positive_direction = Lim::has_infinity ? Lim::infinity() : Lim::max();
return std::nextafter(from, positive_direction);
}
template <typename T>
T NearestSmaller(const T from) {
using Lim = std::numeric_limits<T>;
const auto negative_direction = Lim::has_infinity ? -Lim::infinity() : Lim::lowest();
return std::nextafter(from, negative_direction);
}
// ErrorMonitor Usage:
//
// Call SetDesiredFailureMsg with a string to be compared against all
// encountered log messages, or a validation error enum identifying
// desired error message. Passing NULL or VALIDATION_ERROR_MAX_ENUM
// will match all log messages. logMsg will return true for skipCall
// only if msg is matched or NULL.
//
// Call VerifyFound to determine if all desired failure messages
// were encountered. Call VerifyNotFound to determine if any unexpected
// failure was encountered.
class ErrorMonitor {
public:
ErrorMonitor() {
test_platform_thread_create_mutex(&mutex_);
test_platform_thread_lock_mutex(&mutex_);
Reset();
test_platform_thread_unlock_mutex(&mutex_);
}
~ErrorMonitor() { test_platform_thread_delete_mutex(&mutex_); }
// Set monitor to pristine state
void Reset() {
message_flags_ = VK_DEBUG_REPORT_ERROR_BIT_EXT;
bailout_ = NULL;
message_found_ = VK_FALSE;
failure_message_strings_.clear();
desired_message_strings_.clear();
ignore_message_strings_.clear();
other_messages_.clear();
message_outstanding_count_ = 0;
}
// ErrorMonitor will look for an error message containing the specified string(s)
void SetDesiredFailureMsg(const VkFlags msgFlags, const std::string msg) { SetDesiredFailureMsg(msgFlags, msg.c_str()); }
void SetDesiredFailureMsg(const VkFlags msgFlags, const char *const msgString) {
test_platform_thread_lock_mutex(&mutex_);
desired_message_strings_.insert(msgString);
message_flags_ |= msgFlags;
message_outstanding_count_++;
test_platform_thread_unlock_mutex(&mutex_);
}
// ErrorMonitor will look for an error message containing the specified string(s)
template <typename Iter>
void SetDesiredFailureMsg(const VkFlags msgFlags, Iter iter, const Iter end) {
for (; iter != end; ++iter) {
SetDesiredFailureMsg(msgFlags, *iter);
}
}
// Set an error that the error monitor will ignore. Do not use this function if you are creating a new test.
// TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this
// function and its definition.
void SetUnexpectedError(const char *const msg) {
test_platform_thread_lock_mutex(&mutex_);
ignore_message_strings_.emplace_back(msg);
test_platform_thread_unlock_mutex(&mutex_);
}
VkBool32 CheckForDesiredMsg(const uint32_t message_code, const char *const msgString) {
VkBool32 result = VK_FALSE;
test_platform_thread_lock_mutex(&mutex_);
if (bailout_ != nullptr) {
*bailout_ = true;
}
string errorString(msgString);
bool found_expected = false;
if (!IgnoreMessage(errorString)) {
for (auto desired_msg : desired_message_strings_) {
if (desired_msg.length() == 0) {
// An empty desired_msg string "" indicates a positive test - not expecting an error.
// Return true to avoid calling layers/driver with this error.
// And don't erase the "" string, so it remains if another error is found.
result = VK_TRUE;
found_expected = true;
message_found_ = true;
failure_message_strings_.insert(errorString);
} else if (errorString.find(desired_msg) != string::npos) {
found_expected = true;
message_outstanding_count_--;
failure_message_strings_.insert(errorString);
message_found_ = true;
result = VK_TRUE;
// We only want one match for each expected error so remove from set here
// Since we're about the break the loop it's OK to remove from set we're iterating over
desired_message_strings_.erase(desired_msg);
break;
}
}
if (!found_expected) {
printf("Unexpected: %s\n", msgString);
other_messages_.push_back(errorString);
}
}
test_platform_thread_unlock_mutex(&mutex_);
return result;
}
vector<string> GetOtherFailureMsgs() const { return other_messages_; }
VkDebugReportFlagsEXT GetMessageFlags() const { return message_flags_; }
bool AnyDesiredMsgFound() const { return message_found_; }
bool AllDesiredMsgsFound() const { return (0 == message_outstanding_count_); }
void SetBailout(bool *bailout) { bailout_ = bailout; }
void DumpFailureMsgs() const {
vector<string> otherMsgs = GetOtherFailureMsgs();
if (otherMsgs.size()) {
cout << "Other error messages logged for this test were:" << endl;
for (auto iter = otherMsgs.begin(); iter != otherMsgs.end(); iter++) {
cout << " " << *iter << endl;
}
}
}
// Helpers
// ExpectSuccess now takes an optional argument allowing a custom combination of debug flags
void ExpectSuccess(VkDebugReportFlagsEXT const message_flag_mask = VK_DEBUG_REPORT_ERROR_BIT_EXT) {
// Match ANY message matching specified type
SetDesiredFailureMsg(message_flag_mask, "");
message_flags_ = message_flag_mask; // override mask handling in SetDesired...
}
void VerifyFound() {
// Not seeing the desired message is a failure. /Before/ throwing, dump any other messages.
if (!AllDesiredMsgsFound()) {
DumpFailureMsgs();
for (auto desired_msg : desired_message_strings_) {
ADD_FAILURE() << "Did not receive expected error '" << desired_msg << "'";
}
}
Reset();
}
void VerifyNotFound() {
// ExpectSuccess() configured us to match anything. Any error is a failure.
if (AnyDesiredMsgFound()) {
DumpFailureMsgs();
for (auto msg : failure_message_strings_) {
ADD_FAILURE() << "Expected to succeed but got error: " << msg;
}
}
Reset();
}
private:
// TODO: This is stopgap to block new unexpected errors from being introduced. The long-term goal is to remove the use of this
// function and its definition.
bool IgnoreMessage(std::string const &msg) const {
if (ignore_message_strings_.empty()) {
return false;
}
return std::find_if(ignore_message_strings_.begin(), ignore_message_strings_.end(), [&msg](std::string const &str) {
return msg.find(str) != std::string::npos;
}) != ignore_message_strings_.end();
}
VkFlags message_flags_;
std::unordered_set<string> desired_message_strings_;
std::unordered_set<string> failure_message_strings_;
std::vector<std::string> ignore_message_strings_;
vector<string> other_messages_;
test_platform_thread_mutex mutex_;
bool *bailout_;
bool message_found_;
int message_outstanding_count_;
};
static VKAPI_ATTR VkBool32 VKAPI_CALL myDbgFunc(VkFlags msgFlags, VkDebugReportObjectTypeEXT objType, uint64_t srcObject,
size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg,
void *pUserData) {
ErrorMonitor *errMonitor = (ErrorMonitor *)pUserData;
if (msgFlags & errMonitor->GetMessageFlags()) {
#ifdef _DEBUG
char embedded_code_string[2048];
snprintf(embedded_code_string, 2048, "%s [%08x]", pMsg, msgCode);
return errMonitor->CheckForDesiredMsg(msgCode, embedded_code_string);
#else
return errMonitor->CheckForDesiredMsg(msgCode, pMsg);
#endif
}
return VK_FALSE;
}
class VkLayerTest : public VkRenderFramework {
public:
void VKTriangleTest(BsoFailSelect failCase);
void GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj, VkDescriptorSetObj &descriptorSet,
BsoFailSelect failCase);
void Init(VkPhysicalDeviceFeatures *features = nullptr, VkPhysicalDeviceFeatures2 *features2 = nullptr,
const VkCommandPoolCreateFlags flags = 0) {
InitFramework(myDbgFunc, m_errorMonitor);
InitState(features, features2, flags);
}
protected:
ErrorMonitor *m_errorMonitor;
public:
ErrorMonitor *Monitor() { return m_errorMonitor; }
VkCommandBufferObj *CommandBuffer() { return m_commandBuffer; }
protected:
bool m_enableWSI;
virtual void SetUp() {
m_instance_layer_names.clear();
m_instance_extension_names.clear();
m_device_extension_names.clear();
// Add default instance extensions to the list
m_instance_extension_names.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
// Use Threading layer first to protect others from
// ThreadCommandBufferCollision test
m_instance_layer_names.push_back("VK_LAYER_GOOGLE_threading");
m_instance_layer_names.push_back("VK_LAYER_LUNARG_parameter_validation");
m_instance_layer_names.push_back("VK_LAYER_LUNARG_object_tracker");
m_instance_layer_names.push_back("VK_LAYER_LUNARG_core_validation");
m_instance_layer_names.push_back("VK_LAYER_GOOGLE_unique_objects");
if (VkTestFramework::m_devsim_layer) {
if (InstanceLayerSupported("VK_LAYER_LUNARG_device_simulation")) {
m_instance_layer_names.push_back("VK_LAYER_LUNARG_device_simulation");
} else {
VkTestFramework::m_devsim_layer = false;
printf(" Did not find VK_LAYER_LUNARG_device_simulation layer so it will not be enabled.\n");
}
}
if (m_enableWSI) {
m_instance_extension_names.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
#ifdef NEED_TO_TEST_THIS_ON_PLATFORM
#if defined(VK_USE_PLATFORM_ANDROID_KHR)
m_instance_extension_names.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_ANDROID_KHR
#if defined(VK_USE_PLATFORM_MIR_KHR)
m_instance_extension_names.push_back(VK_KHR_MIR_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_MIR_KHR
#if defined(VK_USE_PLATFORM_WAYLAND_KHR)
m_instance_extension_names.push_back(VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#if defined(VK_USE_PLATFORM_WIN32_KHR)
m_instance_extension_names.push_back(VK_KHR_WIN32_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_WIN32_KHR
#endif // NEED_TO_TEST_THIS_ON_PLATFORM
#if defined(VK_USE_PLATFORM_XCB_KHR)
m_instance_extension_names.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
#elif defined(VK_USE_PLATFORM_XLIB_KHR)
m_instance_extension_names.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
#endif // VK_USE_PLATFORM_XLIB_KHR
}
this->app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
this->app_info.pNext = NULL;
this->app_info.pApplicationName = "layer_tests";
this->app_info.applicationVersion = 1;
this->app_info.pEngineName = "unittest";
this->app_info.engineVersion = 1;
this->app_info.apiVersion = VK_API_VERSION_1_0;
m_errorMonitor = new ErrorMonitor;
}
bool LoadDeviceProfileLayer(
PFN_vkSetPhysicalDeviceFormatPropertiesEXT &fpvkSetPhysicalDeviceFormatPropertiesEXT,
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT &fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT) {
// Load required functions
fpvkSetPhysicalDeviceFormatPropertiesEXT =
(PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT");
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT =
(PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(
instance(), "vkGetOriginalPhysicalDeviceFormatPropertiesEXT");
if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix);
return 0;
}
return 1;
}
virtual void TearDown() {
// Clean up resources before we reset
ShutdownFramework();
delete m_errorMonitor;
}
VkLayerTest() { m_enableWSI = false; }
};
void VkLayerTest::VKTriangleTest(BsoFailSelect failCase) {
ASSERT_TRUE(m_device && m_device->initialized()); // VKTriangleTest assumes Init() has finished
ASSERT_NO_FATAL_FAILURE(InitViewport());
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj ps(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipelineobj(m_device);
pipelineobj.AddDefaultColorAttachment();
pipelineobj.AddShader(&vs);
pipelineobj.AddShader(&ps);
bool failcase_needs_depth = false; // to mark cases that need depth attachment
VkBufferObj index_buffer;
switch (failCase) {
case BsoFailLineWidth: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_LINE_WIDTH);
VkPipelineInputAssemblyStateCreateInfo ia_state = {};
ia_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia_state.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST;
pipelineobj.SetInputAssembly(&ia_state);
break;
}
case BsoFailDepthBias: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BIAS);
VkPipelineRasterizationStateCreateInfo rs_state = {};
rs_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_state.depthBiasEnable = VK_TRUE;
rs_state.lineWidth = 1.0f;
pipelineobj.SetRasterization(&rs_state);
break;
}
case BsoFailViewport: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT);
break;
}
case BsoFailScissor: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR);
break;
}
case BsoFailBlend: {
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_BLEND_CONSTANTS);
VkPipelineColorBlendAttachmentState att_state = {};
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR;
att_state.blendEnable = VK_TRUE;
pipelineobj.AddColorAttachment(0, att_state);
break;
}
case BsoFailDepthBounds: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_DEPTH_BOUNDS);
break;
}
case BsoFailStencilReadMask: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK);
break;
}
case BsoFailStencilWriteMask: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_WRITE_MASK);
break;
}
case BsoFailStencilReference: {
failcase_needs_depth = true;
pipelineobj.MakeDynamic(VK_DYNAMIC_STATE_STENCIL_REFERENCE);
break;
}
case BsoFailIndexBuffer:
break;
case BsoFailIndexBufferBadSize:
case BsoFailIndexBufferBadOffset:
case BsoFailIndexBufferBadMapSize:
case BsoFailIndexBufferBadMapOffset: {
// Create an index buffer for these tests.
// There is no need to populate it because we should bail before trying to draw.
uint32_t const indices[] = {0};
VkBufferCreateInfo buffer_info = {};
buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_info.size = 1024;
buffer_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
buffer_info.queueFamilyIndexCount = 1;
buffer_info.pQueueFamilyIndices = indices;
index_buffer.init(*m_device, buffer_info, (VkMemoryPropertyFlags)VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
} break;
case BsoFailCmdClearAttachments:
break;
case BsoFailNone:
break;
default:
break;
}
VkDescriptorSetObj descriptorSet(m_device);
VkImageView *depth_attachment = nullptr;
if (failcase_needs_depth) {
m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu());
ASSERT_TRUE(m_depth_stencil_fmt != VK_FORMAT_UNDEFINED);
m_depthStencil->Init(m_device, static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height), m_depth_stencil_fmt,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
depth_attachment = m_depthStencil->BindInfo();
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget(1, depth_attachment));
m_commandBuffer->begin();
GenericDrawPreparation(m_commandBuffer, pipelineobj, descriptorSet, failCase);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// render triangle
if (failCase == BsoFailIndexBuffer) {
// Use DrawIndexed w/o an index buffer bound
m_commandBuffer->DrawIndexed(3, 1, 0, 0, 0);
} else if (failCase == BsoFailIndexBufferBadSize) {
// Bind the index buffer and draw one too many indices
m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(513, 1, 0, 0, 0);
} else if (failCase == BsoFailIndexBufferBadOffset) {
// Bind the index buffer and draw one past the end of the buffer using the offset
m_commandBuffer->BindIndexBuffer(&index_buffer, 0, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(512, 1, 1, 0, 0);
} else if (failCase == BsoFailIndexBufferBadMapSize) {
// Bind the index buffer at the middle point and draw one too many indices
m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(257, 1, 0, 0, 0);
} else if (failCase == BsoFailIndexBufferBadMapOffset) {
// Bind the index buffer at the middle point and draw one past the end of the buffer
m_commandBuffer->BindIndexBuffer(&index_buffer, 512, VK_INDEX_TYPE_UINT16);
m_commandBuffer->DrawIndexed(256, 1, 1, 0, 0);
} else {
m_commandBuffer->Draw(3, 1, 0, 0);
}
if (failCase == BsoFailCmdClearAttachments) {
VkClearAttachment color_attachment = {};
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.colorAttachment = 1; // Someone who knew what they were doing would use 0 for the index;
VkClearRect clear_rect = {{{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}}, 0, 0};
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
}
// finalize recording of the command buffer
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(true);
DestroyRenderTarget();
}
void VkLayerTest::GenericDrawPreparation(VkCommandBufferObj *commandBuffer, VkPipelineObj &pipelineobj,
VkDescriptorSetObj &descriptorSet, BsoFailSelect failCase) {
commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, m_depthStencil, m_depth_clear_color, m_stencil_clear_color);
commandBuffer->PrepareAttachments(m_renderTargets, m_depthStencil);
// Make sure depthWriteEnable is set so that Depth fail test will work
// correctly
// Make sure stencilTestEnable is set so that Stencil fail test will work
// correctly
VkStencilOpState stencil = {};
stencil.failOp = VK_STENCIL_OP_KEEP;
stencil.passOp = VK_STENCIL_OP_KEEP;
stencil.depthFailOp = VK_STENCIL_OP_KEEP;
stencil.compareOp = VK_COMPARE_OP_NEVER;
VkPipelineDepthStencilStateCreateInfo ds_ci = {};
ds_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
ds_ci.pNext = NULL;
ds_ci.depthTestEnable = VK_FALSE;
ds_ci.depthWriteEnable = VK_TRUE;
ds_ci.depthCompareOp = VK_COMPARE_OP_NEVER;
ds_ci.depthBoundsTestEnable = VK_FALSE;
if (failCase == BsoFailDepthBounds) {
ds_ci.depthBoundsTestEnable = VK_TRUE;
ds_ci.maxDepthBounds = 0.0f;
ds_ci.minDepthBounds = 0.0f;
}
ds_ci.stencilTestEnable = VK_TRUE;
ds_ci.front = stencil;
ds_ci.back = stencil;
pipelineobj.SetDepthStencil(&ds_ci);
pipelineobj.SetViewport(m_viewports);
pipelineobj.SetScissor(m_scissors);
descriptorSet.CreateVKDescriptorSet(commandBuffer);
VkResult err = pipelineobj.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
ASSERT_VK_SUCCESS(err);
vkCmdBindPipeline(commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipelineobj.handle());
commandBuffer->BindDescriptorSet(descriptorSet);
}
class VkPositiveLayerTest : public VkLayerTest {
public:
protected:
};
class VkWsiEnabledLayerTest : public VkLayerTest {
public:
protected:
VkWsiEnabledLayerTest() { m_enableWSI = true; }
};
class VkBufferTest {
public:
enum eTestEnFlags {
eDoubleDelete,
eInvalidDeviceOffset,
eInvalidMemoryOffset,
eBindNullBuffer,
eBindFakeBuffer,
eFreeInvalidHandle,
eNone,
};
enum eTestConditions { eOffsetAlignment = 1 };
static bool GetTestConditionValid(VkDeviceObj *aVulkanDevice, eTestEnFlags aTestFlag, VkBufferUsageFlags aBufferUsage = 0) {
if (eInvalidDeviceOffset != aTestFlag && eInvalidMemoryOffset != aTestFlag) {
return true;
}
VkDeviceSize offset_limit = 0;
if (eInvalidMemoryOffset == aTestFlag) {
VkBuffer vulkanBuffer;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 32;
buffer_create_info.usage = aBufferUsage;
vkCreateBuffer(aVulkanDevice->device(), &buffer_create_info, nullptr, &vulkanBuffer);
VkMemoryRequirements memory_reqs = {};
vkGetBufferMemoryRequirements(aVulkanDevice->device(), vulkanBuffer, &memory_reqs);
vkDestroyBuffer(aVulkanDevice->device(), vulkanBuffer, nullptr);
offset_limit = memory_reqs.alignment;
} else if ((VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) & aBufferUsage) {
offset_limit = aVulkanDevice->props.limits.minTexelBufferOffsetAlignment;
} else if (VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT & aBufferUsage) {
offset_limit = aVulkanDevice->props.limits.minUniformBufferOffsetAlignment;
} else if (VK_BUFFER_USAGE_STORAGE_BUFFER_BIT & aBufferUsage) {
offset_limit = aVulkanDevice->props.limits.minStorageBufferOffsetAlignment;
}
return eOffsetAlignment < offset_limit;
}
// A constructor which performs validation tests within construction.
VkBufferTest(VkDeviceObj *aVulkanDevice, VkBufferUsageFlags aBufferUsage, eTestEnFlags aTestFlag = eNone)
: AllocateCurrent(true),
BoundCurrent(false),
CreateCurrent(false),
InvalidDeleteEn(false),
VulkanDevice(aVulkanDevice->device()) {
if (eBindNullBuffer == aTestFlag || eBindFakeBuffer == aTestFlag) {
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = 1; // fake size -- shouldn't matter for the test
memory_allocate_info.memoryTypeIndex = 0; // fake type -- shouldn't matter for the test
vkAllocateMemory(VulkanDevice, &memory_allocate_info, nullptr, &VulkanMemory);
VulkanBuffer = (aTestFlag == eBindNullBuffer) ? VK_NULL_HANDLE : (VkBuffer)0xCDCDCDCDCDCDCDCD;
vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, 0);
} else {
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 32;
buffer_create_info.usage = aBufferUsage;
vkCreateBuffer(VulkanDevice, &buffer_create_info, nullptr, &VulkanBuffer);
CreateCurrent = true;
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(VulkanDevice, VulkanBuffer, &memory_requirements);
VkMemoryAllocateInfo memory_allocate_info = {};
memory_allocate_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_allocate_info.allocationSize = memory_requirements.size + eOffsetAlignment;
bool pass = aVulkanDevice->phy().set_memory_type(memory_requirements.memoryTypeBits, &memory_allocate_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
CreateCurrent = false;
vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr);
return;
}
vkAllocateMemory(VulkanDevice, &memory_allocate_info, NULL, &VulkanMemory);
// NB: 1 is intentionally an invalid offset value
const bool offset_en = eInvalidDeviceOffset == aTestFlag || eInvalidMemoryOffset == aTestFlag;
vkBindBufferMemory(VulkanDevice, VulkanBuffer, VulkanMemory, offset_en ? eOffsetAlignment : 0);
BoundCurrent = true;
InvalidDeleteEn = (eFreeInvalidHandle == aTestFlag);
}
}
~VkBufferTest() {
if (CreateCurrent) {
vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr);
}
if (AllocateCurrent) {
if (InvalidDeleteEn) {
union {
VkDeviceMemory device_memory;
unsigned long long index_access;
} bad_index;
bad_index.device_memory = VulkanMemory;
bad_index.index_access++;
vkFreeMemory(VulkanDevice, bad_index.device_memory, nullptr);
}
vkFreeMemory(VulkanDevice, VulkanMemory, nullptr);
}
}
bool GetBufferCurrent() { return AllocateCurrent && BoundCurrent && CreateCurrent; }
const VkBuffer &GetBuffer() { return VulkanBuffer; }
void TestDoubleDestroy() {
// Destroy the buffer but leave the flag set, which will cause
// the buffer to be destroyed again in the destructor.
vkDestroyBuffer(VulkanDevice, VulkanBuffer, nullptr);
}
protected:
bool AllocateCurrent;
bool BoundCurrent;
bool CreateCurrent;
bool InvalidDeleteEn;
VkBuffer VulkanBuffer;
VkDevice VulkanDevice;
VkDeviceMemory VulkanMemory;
};
class VkVerticesObj {
public:
VkVerticesObj(VkDeviceObj *aVulkanDevice, unsigned aAttributeCount, unsigned aBindingCount, unsigned aByteStride,
VkDeviceSize aVertexCount, const float *aVerticies)
: BoundCurrent(false),
AttributeCount(aAttributeCount),
BindingCount(aBindingCount),
BindId(BindIdGenerator),
PipelineVertexInputStateCreateInfo(),
VulkanMemoryBuffer(aVulkanDevice, static_cast<int>(aByteStride * aVertexCount),
reinterpret_cast<const void *>(aVerticies), VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) {
BindIdGenerator++; // NB: This can wrap w/misuse
VertexInputAttributeDescription = new VkVertexInputAttributeDescription[AttributeCount];
VertexInputBindingDescription = new VkVertexInputBindingDescription[BindingCount];
PipelineVertexInputStateCreateInfo.pVertexAttributeDescriptions = VertexInputAttributeDescription;
PipelineVertexInputStateCreateInfo.vertexAttributeDescriptionCount = AttributeCount;
PipelineVertexInputStateCreateInfo.pVertexBindingDescriptions = VertexInputBindingDescription;
PipelineVertexInputStateCreateInfo.vertexBindingDescriptionCount = BindingCount;
PipelineVertexInputStateCreateInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
unsigned i = 0;
do {
VertexInputAttributeDescription[i].binding = BindId;
VertexInputAttributeDescription[i].location = i;
VertexInputAttributeDescription[i].format = VK_FORMAT_R32G32B32_SFLOAT;
VertexInputAttributeDescription[i].offset = sizeof(float) * aByteStride;
i++;
} while (AttributeCount < i);
i = 0;
do {
VertexInputBindingDescription[i].binding = BindId;
VertexInputBindingDescription[i].stride = aByteStride;
VertexInputBindingDescription[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
i++;
} while (BindingCount < i);
}
~VkVerticesObj() {
if (VertexInputAttributeDescription) {
delete[] VertexInputAttributeDescription;
}
if (VertexInputBindingDescription) {
delete[] VertexInputBindingDescription;
}
}
bool AddVertexInputToPipe(VkPipelineObj &aPipelineObj) {
aPipelineObj.AddVertexInputAttribs(VertexInputAttributeDescription, AttributeCount);
aPipelineObj.AddVertexInputBindings(VertexInputBindingDescription, BindingCount);
return true;
}
void BindVertexBuffers(VkCommandBuffer aCommandBuffer, unsigned aOffsetCount = 0, VkDeviceSize *aOffsetList = nullptr) {
VkDeviceSize *offsetList;
unsigned offsetCount;
if (aOffsetCount) {
offsetList = aOffsetList;
offsetCount = aOffsetCount;
} else {
offsetList = new VkDeviceSize[1]();
offsetCount = 1;
}
vkCmdBindVertexBuffers(aCommandBuffer, BindId, offsetCount, &VulkanMemoryBuffer.handle(), offsetList);
BoundCurrent = true;
if (!aOffsetCount) {
delete[] offsetList;
}
}
protected:
static uint32_t BindIdGenerator;
bool BoundCurrent;
unsigned AttributeCount;
unsigned BindingCount;
uint32_t BindId;
VkPipelineVertexInputStateCreateInfo PipelineVertexInputStateCreateInfo;
VkVertexInputAttributeDescription *VertexInputAttributeDescription;
VkVertexInputBindingDescription *VertexInputBindingDescription;
VkConstantBufferObj VulkanMemoryBuffer;
};
uint32_t VkVerticesObj::BindIdGenerator;
struct OneOffDescriptorSet {
VkDeviceObj *device_;
VkDescriptorPool pool_;
VkDescriptorSetLayoutObj layout_;
VkDescriptorSet set_;
typedef std::vector<VkDescriptorSetLayoutBinding> Bindings;
OneOffDescriptorSet(VkDeviceObj *device, const Bindings &bindings)
: device_{device}, pool_{}, layout_(device, bindings), set_{} {
VkResult err;
std::vector<VkDescriptorPoolSize> sizes;
for (const auto &b : bindings) sizes.push_back({b.descriptorType, std::max(1u, b.descriptorCount)});
VkDescriptorPoolCreateInfo dspci = {
VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, nullptr, 0, 1, uint32_t(sizes.size()), sizes.data()};
err = vkCreateDescriptorPool(device_->handle(), &dspci, nullptr, &pool_);
if (err != VK_SUCCESS) return;
VkDescriptorSetAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, nullptr, pool_, 1,
&layout_.handle()};
err = vkAllocateDescriptorSets(device_->handle(), &alloc_info, &set_);
}
~OneOffDescriptorSet() {
// No need to destroy set-- it's going away with the pool.
vkDestroyDescriptorPool(device_->handle(), pool_, nullptr);
}
bool Initialized() { return pool_ != VK_NULL_HANDLE && layout_.initialized() && set_ != VK_NULL_HANDLE; }
};
template <typename T>
bool IsValidVkStruct(const T &s) {
return LvlTypeMap<T>::kSType == s.sType;
}
// Helper class for tersely creating create pipeline tests
//
// Designed with minimal error checking to ensure easy error state creation
// See OneshotTest for typical usage
struct CreatePipelineHelper {
public:
std::vector<VkDescriptorSetLayoutBinding> dsl_bindings_;
std::unique_ptr<OneOffDescriptorSet> descriptor_set_;
std::vector<VkPipelineShaderStageCreateInfo> shader_stages_;
VkPipelineVertexInputStateCreateInfo vi_ci_ = {};
VkPipelineInputAssemblyStateCreateInfo ia_ci_ = {};
VkPipelineTessellationStateCreateInfo tess_ci_ = {};
VkViewport viewport_ = {};
VkRect2D scissor_ = {};
VkPipelineViewportStateCreateInfo vp_state_ci_ = {};
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci_ = {};
VkPipelineLayoutCreateInfo pipeline_layout_ci_ = {};
VkPipelineLayoutObj pipeline_layout_;
VkPipelineDynamicStateCreateInfo dyn_state_ci_ = {};
VkPipelineRasterizationStateCreateInfo rs_state_ci_ = {};
VkPipelineColorBlendAttachmentState cb_attachments_ = {};
VkPipelineColorBlendStateCreateInfo cb_ci_ = {};
VkGraphicsPipelineCreateInfo gp_ci_ = {};
VkPipelineCacheCreateInfo pc_ci_ = {};
VkPipeline pipeline_ = VK_NULL_HANDLE;
VkPipelineCache pipeline_cache_ = VK_NULL_HANDLE;
std::unique_ptr<VkShaderObj> vs_;
std::unique_ptr<VkShaderObj> fs_;
VkLayerTest &layer_test_;
CreatePipelineHelper(VkLayerTest &test) : layer_test_(test) {}
~CreatePipelineHelper() {
VkDevice device = layer_test_.device();
vkDestroyPipelineCache(device, pipeline_cache_, nullptr);
vkDestroyPipeline(device, pipeline_, nullptr);
}
void InitDescriptorSetInfo() { dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }
void InitInputAndVertexInfo() {
vi_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
ia_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
}
void InitMultisampleInfo() {
pipe_ms_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci_.pNext = nullptr;
pipe_ms_state_ci_.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipe_ms_state_ci_.sampleShadingEnable = VK_FALSE;
pipe_ms_state_ci_.minSampleShading = 1.0;
pipe_ms_state_ci_.pSampleMask = NULL;
}
void InitPipelineLayoutInfo() {
pipeline_layout_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci_.setLayoutCount = 1; // Not really changeable because InitState() sets exactly one pSetLayout
pipeline_layout_ci_.pSetLayouts = nullptr; // must bound after it is created
}
void InitViewportInfo() {
viewport_ = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
scissor_ = {{0, 0}, {64, 64}};
vp_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp_state_ci_.pNext = nullptr;
vp_state_ci_.viewportCount = 1;
vp_state_ci_.pViewports = &viewport_; // ignored if dynamic
vp_state_ci_.scissorCount = 1;
vp_state_ci_.pScissors = &scissor_; // ignored if dynamic
}
void InitDynamicStateInfo() {
// Use a "validity" check on the {} initialized structure to detect initialization
// during late bind
}
void InitShaderInfo() {
vs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, &layer_test_));
fs_.reset(new VkShaderObj(layer_test_.DeviceObj(), bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, &layer_test_));
// We shouldn't need a fragment shader but add it to be able to run on more devices
shader_stages_ = {vs_->GetStageCreateInfo(), fs_->GetStageCreateInfo()};
}
void InitRasterizationInfo() {
rs_state_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_state_ci_.pNext = nullptr;
rs_state_ci_.flags = 0;
rs_state_ci_.depthClampEnable = VK_FALSE;
rs_state_ci_.rasterizerDiscardEnable = VK_FALSE;
rs_state_ci_.polygonMode = VK_POLYGON_MODE_FILL;
rs_state_ci_.cullMode = VK_CULL_MODE_BACK_BIT;
rs_state_ci_.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rs_state_ci_.depthBiasEnable = VK_FALSE;
rs_state_ci_.lineWidth = 1.0F;
}
void InitBlendStateInfo() {
cb_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
cb_ci_.logicOpEnable = VK_FALSE;
cb_ci_.logicOp = VK_LOGIC_OP_COPY; // ignored if enable is VK_FALSE above
cb_ci_.attachmentCount = layer_test_.RenderPassInfo().subpassCount;
ASSERT_TRUE(IsValidVkStruct(layer_test_.RenderPassInfo()));
cb_ci_.pAttachments = &cb_attachments_;
for (int i = 0; i < 4; i++) {
cb_ci_.blendConstants[0] = 1.0F;
}
}
void InitGraphicsPipelineInfo() {
// Color-only rendering in a subpass with no depth/stencil attachment
// Active Pipeline Shader Stages
// Vertex Shader
// Fragment Shader
// Required: Fixed-Function Pipeline Stages
// VkPipelineVertexInputStateCreateInfo
// VkPipelineInputAssemblyStateCreateInfo
// VkPipelineViewportStateCreateInfo
// VkPipelineRasterizationStateCreateInfo
// VkPipelineMultisampleStateCreateInfo
// VkPipelineColorBlendStateCreateInfo
gp_ci_.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
gp_ci_.pNext = nullptr;
gp_ci_.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
gp_ci_.pVertexInputState = &vi_ci_;
gp_ci_.pInputAssemblyState = &ia_ci_;
gp_ci_.pTessellationState = nullptr;
gp_ci_.pViewportState = &vp_state_ci_;
gp_ci_.pRasterizationState = &rs_state_ci_;
gp_ci_.pMultisampleState = &pipe_ms_state_ci_;
gp_ci_.pDepthStencilState = nullptr;
gp_ci_.pColorBlendState = &cb_ci_;
gp_ci_.pDynamicState = nullptr;
gp_ci_.renderPass = layer_test_.renderPass();
}
void InitPipelineCacheInfo() {
pc_ci_.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
pc_ci_.pNext = nullptr;
pc_ci_.flags = 0;
pc_ci_.initialDataSize = 0;
pc_ci_.pInitialData = nullptr;
}
// Not called by default during init_info
void InitTesselationState() {
// TBD -- add shaders and create_info
}
// TDB -- add control for optional and/or additional initialization
void InitInfo() {
InitDescriptorSetInfo();
InitInputAndVertexInfo();
InitMultisampleInfo();
InitPipelineLayoutInfo();
InitViewportInfo();
InitDynamicStateInfo();
InitShaderInfo();
InitRasterizationInfo();
InitBlendStateInfo();
InitGraphicsPipelineInfo();
InitPipelineCacheInfo();
}
void InitState() {
VkResult err;
descriptor_set_.reset(new OneOffDescriptorSet(layer_test_.DeviceObj(), dsl_bindings_));
ASSERT_TRUE(descriptor_set_->Initialized());
const std::vector<VkPushConstantRange> push_ranges(
pipeline_layout_ci_.pPushConstantRanges,
pipeline_layout_ci_.pPushConstantRanges + pipeline_layout_ci_.pushConstantRangeCount);
pipeline_layout_ = VkPipelineLayoutObj(layer_test_.DeviceObj(), {&descriptor_set_->layout_}, push_ranges);
err = vkCreatePipelineCache(layer_test_.device(), &pc_ci_, NULL, &pipeline_cache_);
ASSERT_VK_SUCCESS(err);
}
void LateBindPipelineInfo() {
// By value or dynamically located items must be late bound
gp_ci_.layout = pipeline_layout_.handle();
gp_ci_.stageCount = shader_stages_.size();
gp_ci_.pStages = shader_stages_.data();
if ((gp_ci_.pTessellationState == nullptr) && IsValidVkStruct(tess_ci_)) {
gp_ci_.pTessellationState = &tess_ci_;
}
if ((gp_ci_.pDynamicState == nullptr) && IsValidVkStruct(dyn_state_ci_)) {
gp_ci_.pDynamicState = &dyn_state_ci_;
}
}
VkResult CreateGraphicsPipeline(bool implicit_destroy = true, bool do_late_bind = true) {
VkResult err;
if (do_late_bind) {
LateBindPipelineInfo();
}
if (implicit_destroy && (pipeline_ != VK_NULL_HANDLE)) {
vkDestroyPipeline(layer_test_.device(), pipeline_, nullptr);
pipeline_ = VK_NULL_HANDLE;
}
err = vkCreateGraphicsPipelines(layer_test_.device(), pipeline_cache_, 1, &gp_ci_, NULL, &pipeline_);
return err;
}
// Helper function to create a simple test case (positive or negative)
//
// info_override can be any callable that takes a CreatePipelineHeper &
// flags, error can be any args accepted by "SetDesiredFailure".
template <typename Test, typename OverrideFunc, typename Error>
static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, const std::vector<Error> &errors,
bool positive_test = false) {
CreatePipelineHelper helper(test);
helper.InitInfo();
info_override(helper);
helper.InitState();
for (const auto &error : errors) test.Monitor()->SetDesiredFailureMsg(flags, error);
helper.CreateGraphicsPipeline();
if (positive_test) {
test.Monitor()->VerifyNotFound();
} else {
test.Monitor()->VerifyFound();
}
}
template <typename Test, typename OverrideFunc, typename Error>
static void OneshotTest(Test &test, OverrideFunc &info_override, const VkFlags flags, Error error, bool positive_test = false) {
OneshotTest(test, info_override, flags, std::vector<Error>(1, error), positive_test);
}
};
namespace chain_util {
template <typename T>
T Init(const void *pnext_in = nullptr) {
T pnext_obj = {};
pnext_obj.sType = LvlTypeMap<T>::kSType;
pnext_obj.pNext = pnext_in;
return pnext_obj;
}
class ExtensionChain {
const void *head_ = nullptr;
typedef std::function<bool(const char *)> AddIfFunction;
AddIfFunction add_if_;
typedef std::vector<const char *> List;
List *list_;
public:
template <typename F>
ExtensionChain(F &add_if, List *list) : add_if_(add_if), list_(list) {}
template <typename T>
void Add(const char *name, T &obj) {
if (add_if_(name)) {
if (list_) {
list_->push_back(name);
}
obj.pNext = head_;
head_ = &obj;
}
}
const void *Head() const { return head_; }
};
} // namespace chain_util
// ********************************************************************************************************************
// ********************************************************************************************************************
// ********************************************************************************************************************
// ********************************************************************************************************************
TEST_F(VkLayerTest, RequiredParameter) {
TEST_DESCRIPTION("Specify VK_NULL_HANDLE, NULL, and 0 for required handle, pointer, array, and array count parameters");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pFeatures specified as NULL");
// Specify NULL for a pointer to a handle
// Expected to trigger an error with
// parameter_validation::validate_required_pointer
vkGetPhysicalDeviceFeatures(gpu(), NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"required parameter pQueueFamilyPropertyCount specified as NULL");
// Specify NULL for pointer to array count
// Expected to trigger an error with parameter_validation::validate_array
vkGetPhysicalDeviceQueueFamilyProperties(gpu(), NULL, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength");
// Specify 0 for a required array count
// Expected to trigger an error with parameter_validation::validate_array
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_commandBuffer->SetViewport(0, 0, &viewport);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter");
// Specify NULL for a required array
// Expected to trigger an error with parameter_validation::validate_array
m_commandBuffer->SetViewport(0, 1, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter memory specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle
// Expected to trigger an error with
// parameter_validation::validate_required_handle
vkUnmapMemory(device(), VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"required parameter pFences[0] specified as VK_NULL_HANDLE");
// Specify VK_NULL_HANDLE for a required handle array entry
// Expected to trigger an error with
// parameter_validation::validate_required_handle_array
VkFence fence = VK_NULL_HANDLE;
vkResetFences(device(), 1, &fence);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "required parameter pAllocateInfo specified as NULL");
// Specify NULL for a required struct pointer
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkDeviceMemory memory = VK_NULL_HANDLE;
vkAllocateMemory(device(), NULL, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of faceMask must not be 0");
// Specify 0 for a required VkFlags parameter
// Expected to trigger an error with parameter_validation::validate_flags
m_commandBuffer->SetStencilReference(0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "value of pSubmits[0].pWaitDstStageMask[0] must not be 0");
// Specify 0 for a required VkFlags array entry
// Expected to trigger an error with
// parameter_validation::validate_flags_array
VkSemaphore semaphore = VK_NULL_HANDLE;
VkPipelineStageFlags stageFlags = 0;
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &semaphore;
submitInfo.pWaitDstStageMask = &stageFlags;
vkQueueSubmit(m_device->m_queue, 1, &submitInfo, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ReservedParameter) {
TEST_DESCRIPTION("Specify a non-zero value for a reserved parameter");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " must be 0");
// Specify 0 for a reserved VkFlags parameter
// Expected to trigger an error with
// parameter_validation::validate_reserved_flags
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_info.flags = 1;
vkCreateEvent(device(), &event_info, NULL, &event_handle);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DebugMarkerNameTest) {
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_DEBUG_MARKER_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DEBUG_MARKER_EXTENSION_NAME);
} else {
printf("%s Debug Marker Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkDebugMarkerSetObjectNameEXT fpvkDebugMarkerSetObjectNameEXT =
(PFN_vkDebugMarkerSetObjectNameEXT)vkGetInstanceProcAddr(instance(), "vkDebugMarkerSetObjectNameEXT");
if (!(fpvkDebugMarkerSetObjectNameEXT)) {
printf("%s Can't find fpvkDebugMarkerSetObjectNameEXT; skipped.\n", kSkipPrefix);
return;
}
VkEvent event_handle = VK_NULL_HANDLE;
VkEventCreateInfo event_info = {};
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(device(), &event_info, NULL, &event_handle);
VkDebugMarkerObjectNameInfoEXT name_info = {};
name_info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
name_info.pNext = nullptr;
name_info.object = (uint64_t)event_handle;
name_info.objectType = VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT;
name_info.pObjectName = "UnimaginablyImprobableString";
fpvkDebugMarkerSetObjectNameEXT(device(), &name_info);
m_commandBuffer->begin();
vkCmdSetEvent(m_commandBuffer->handle(), event_handle, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "UnimaginablyImprobableString");
vkDestroyEvent(m_device->device(), event_handle, NULL);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
}
TEST_F(VkLayerTest, InvalidStructSType) {
TEST_DESCRIPTION("Specify an invalid VkStructureType for a Vulkan structure's sType field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pAllocateInfo->sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type
VkMemoryAllocateInfo alloc_info = {};
VkDeviceMemory memory = VK_NULL_HANDLE;
vkAllocateMemory(device(), &alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "parameter pSubmits[0].sType must be");
// Zero struct memory, effectively setting sType to
// VK_STRUCTURE_TYPE_APPLICATION_INFO
// Expected to trigger an error with
// parameter_validation::validate_struct_type_array
VkSubmitInfo submit_info = {};
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidStructPNext) {
TEST_DESCRIPTION("Specify an invalid value for a Vulkan structure's pNext field");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "value of pCreateInfo->pNext must be NULL");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, when pNext must be NULL.
// Need to pick a function that has no allowed pNext structure types.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkEvent event = VK_NULL_HANDLE;
VkEventCreateInfo event_alloc_info = {};
// Zero-initialization will provide the correct sType
VkApplicationInfo app_info = {};
event_alloc_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
event_alloc_info.pNext = &app_info;
vkCreateEvent(device(), &event_alloc_info, NULL, &event);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
" chain includes a structure with unexpected VkStructureType ");
// Set VkMemoryAllocateInfo::pNext to a non-NULL value, but use
// a function that has allowed pNext structure types and specify
// a structure type that is not allowed.
// Expected to trigger an error with parameter_validation::validate_struct_pnext
VkDeviceMemory memory = VK_NULL_HANDLE;
VkMemoryAllocateInfo memory_alloc_info = {};
memory_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_alloc_info.pNext = &app_info;
vkAllocateMemory(device(), &memory_alloc_info, NULL, &memory);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueOutOfRange) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"does not fall within the begin..end range of the core VkFormat enumeration tokens");
// Specify an invalid VkFormat value
// Expected to trigger an error with
// parameter_validation::validate_ranged_enum
VkFormatProperties format_properties;
vkGetPhysicalDeviceFormatProperties(gpu(), static_cast<VkFormat>(8000), &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadMask) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags bitmask value
// Expected to trigger an error with parameter_validation::validate_flags
VkImageFormatProperties image_format_properties;
vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
static_cast<VkImageUsageFlags>(1 << 25), 0, &image_format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadFlag) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "contains flag bits that are not recognized members of");
// Specify an invalid VkFlags array entry
// Expected to trigger an error with
// parameter_validation::validate_flags_array
VkSemaphore semaphore = VK_NULL_HANDLE;
VkPipelineStageFlags stage_flags = static_cast<VkPipelineStageFlags>(1 << 25);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = &stage_flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UnrecognizedValueBadBool) {
// Make sure using VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE doesn't trigger a false positive.
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME);
} else {
printf("%s VK_KHR_sampler_mirror_clamp_to_edge extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is neither VK_TRUE nor VK_FALSE");
// Specify an invalid VkBool32 value, expecting a warning with parameter_validation::validate_bool32
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
// Not VK_TRUE or VK_FALSE
sampler_info.anisotropyEnable = 3;
vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, MirrorClampToEdgeNotEnabled) {
TEST_DESCRIPTION("Validation should catch using CLAMP_TO_EDGE addressing mode if the extension is not enabled.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-addressModeU-01079");
VkSampler sampler = VK_NULL_HANDLE;
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
// Set the modes to cause the error
sampler_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE;
vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, AnisotropyFeatureDisabled) {
TEST_DESCRIPTION("Validation should check anisotropy parameters are correct with samplerAnisotropy disabled.");
// Determine if required device features are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
device_features.samplerAnisotropy = VK_FALSE; // force anisotropy off
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSamplerCreateInfo-anisotropyEnable-01070");
VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo();
// With the samplerAnisotropy disable, the sampler must not enable it.
sampler_info.anisotropyEnable = VK_TRUE;
VkSampler sampler = VK_NULL_HANDLE;
VkResult err;
err = vkCreateSampler(m_device->device(), &sampler_info, NULL, &sampler);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == err) {
vkDestroySampler(m_device->device(), sampler, NULL);
}
sampler = VK_NULL_HANDLE;
}
TEST_F(VkLayerTest, AnisotropyFeatureEnabled) {
TEST_DESCRIPTION("Validation must check several conditions that apply only when Anisotropy is enabled.");
// Determine if required device features are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
// These tests require that the device support sparse residency for 2D images
if (VK_TRUE != device_features.samplerAnisotropy) {
printf("%s Test requires unsupported samplerAnisotropy feature. Skipped.\n", kSkipPrefix);
return;
}
bool cubic_support = false;
if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) {
m_device_extension_names.push_back("VK_IMG_filter_cubic");
cubic_support = true;
}
VkSamplerCreateInfo sampler_info_ref = SafeSaneSamplerCreateInfo();
sampler_info_ref.anisotropyEnable = VK_TRUE;
VkSamplerCreateInfo sampler_info = sampler_info_ref;
ASSERT_NO_FATAL_FAILURE(InitState());
auto do_test = [this](std::string code, const VkSamplerCreateInfo *pCreateInfo) -> void {
VkResult err;
VkSampler sampler = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, code);
err = vkCreateSampler(m_device->device(), pCreateInfo, NULL, &sampler);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == err) {
vkDestroySampler(m_device->device(), sampler, NULL);
}
};
// maxAnisotropy out-of-bounds low.
sampler_info.maxAnisotropy = NearestSmaller(1.0F);
do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info);
sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy;
// maxAnisotropy out-of-bounds high.
sampler_info.maxAnisotropy = NearestGreater(m_device->phy().properties().limits.maxSamplerAnisotropy);
do_test("VUID-VkSamplerCreateInfo-anisotropyEnable-01071", &sampler_info);
sampler_info.maxAnisotropy = sampler_info_ref.maxAnisotropy;
// Both anisotropy and unnormalized coords enabled
sampler_info.unnormalizedCoordinates = VK_TRUE;
do_test("VUID-VkSamplerCreateInfo-unnormalizedCoordinates-01076", &sampler_info);
sampler_info.unnormalizedCoordinates = sampler_info_ref.unnormalizedCoordinates;
// Both anisotropy and cubic filtering enabled
if (cubic_support) {
sampler_info.minFilter = VK_FILTER_CUBIC_IMG;
do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info);
sampler_info.minFilter = sampler_info_ref.minFilter;
sampler_info.magFilter = VK_FILTER_CUBIC_IMG;
do_test("VUID-VkSamplerCreateInfo-magFilter-01081", &sampler_info);
sampler_info.magFilter = sampler_info_ref.magFilter;
} else {
printf("%s Test requires unsupported extension \"VK_IMG_filter_cubic\". Skipped.\n", kSkipPrefix);
}
}
TEST_F(VkLayerTest, UnrecognizedValueMaxEnum) {
ASSERT_NO_FATAL_FAILURE(Init());
// Specify MAX_ENUM
VkFormatProperties format_properties;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not fall within the begin..end range");
vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_MAX_ENUM, &format_properties);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UpdateBufferAlignment) {
TEST_DESCRIPTION("Check alignment parameters for vkCmdUpdateBuffer");
uint32_t updateData[] = {1, 2, 3, 4, 5, 6, 7, 8};
ASSERT_NO_FATAL_FAILURE(Init());
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VkBufferObj buffer;
buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs);
m_commandBuffer->begin();
// Introduce failure by using dstOffset that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->UpdateBuffer(buffer.handle(), 1, 4, updateData);
m_errorMonitor->VerifyFound();
// Introduce failure by using dataSize that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->UpdateBuffer(buffer.handle(), 0, 6, updateData);
m_errorMonitor->VerifyFound();
// Introduce failure by using dataSize that is < 0
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"must be greater than zero and less than or equal to 65536");
m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)-44, updateData);
m_errorMonitor->VerifyFound();
// Introduce failure by using dataSize that is > 65536
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"must be greater than zero and less than or equal to 65536");
m_commandBuffer->UpdateBuffer(buffer.handle(), 0, (VkDeviceSize)80000, updateData);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, FillBufferAlignment) {
TEST_DESCRIPTION("Check alignment parameters for vkCmdFillBuffer");
ASSERT_NO_FATAL_FAILURE(Init());
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VkBufferObj buffer;
buffer.init_as_dst(*m_device, (VkDeviceSize)20, reqs);
m_commandBuffer->begin();
// Introduce failure by using dstOffset that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->FillBuffer(buffer.handle(), 1, 4, 0x11111111);
m_errorMonitor->VerifyFound();
// Introduce failure by using size that is not multiple of 4
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is not a multiple of 4");
m_commandBuffer->FillBuffer(buffer.handle(), 0, 6, 0x11111111);
m_errorMonitor->VerifyFound();
// Introduce failure by using size that is zero
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "must be greater than zero");
m_commandBuffer->FillBuffer(buffer.handle(), 0, 0, 0x11111111);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, PSOPolygonModeInvalid) {
TEST_DESCRIPTION("Attempt to use a non-solid polygon fill mode in a pipeline when this feature is not enabled.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Artificially disable support for non-solid fill modes
features.fillModeNonSolid = VK_FALSE;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkRenderpassObj render_pass(&test_device);
const VkPipelineLayoutObj pipeline_layout(&test_device);
VkPipelineRasterizationStateCreateInfo rs_ci = {};
rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_ci.pNext = nullptr;
rs_ci.lineWidth = 1.0f;
rs_ci.rasterizerDiscardEnable = VK_TRUE;
VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
// Set polygonMode to unsupported value POINT, should fail
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE");
{
VkPipelineObj pipe(&test_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
// Introduce failure by setting unsupported polygon mode
rs_ci.polygonMode = VK_POLYGON_MODE_POINT;
pipe.SetRasterization(&rs_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
}
m_errorMonitor->VerifyFound();
// Try again with polygonMode=LINE, should fail
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"polygonMode cannot be VK_POLYGON_MODE_POINT or VK_POLYGON_MODE_LINE");
{
VkPipelineObj pipe(&test_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
// Introduce failure by setting unsupported polygon mode
rs_ci.polygonMode = VK_POLYGON_MODE_LINE;
pipe.SetRasterization(&rs_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
}
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SparseBindingImageBufferCreate) {
TEST_DESCRIPTION("Create buffer/image with sparse attributes but without the sparse_binding bit set");
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
buf_info.size = 2048;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (m_device->phy().features().sparseResidencyBuffer) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918");
buf_info.flags = VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT;
vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
m_errorMonitor->VerifyFound();
} else {
printf("%s Test requires unsupported sparseResidencyBuffer feature. Skipped.\n", kSkipPrefix);
return;
}
if (m_device->phy().features().sparseResidencyAliased) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-flags-00918");
buf_info.flags = VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
m_errorMonitor->VerifyFound();
} else {
printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix);
return;
}
VkImage image;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 512;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if (m_device->phy().features().sparseResidencyImage2D) {
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987");
vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
} else {
printf("%s Test requires unsupported sparseResidencyImage2D feature. Skipped.\n", kSkipPrefix);
return;
}
if (m_device->phy().features().sparseResidencyAliased) {
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_ALIASED_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00987");
vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
} else {
printf("%s Test requires unsupported sparseResidencyAliased feature. Skipped.\n", kSkipPrefix);
return;
}
}
TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedTypes) {
TEST_DESCRIPTION("Create images with sparse residency with unsupported types");
// Determine which device feature are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
// Mask out device features we don't want and initialize device state
device_features.sparseResidencyImage2D = VK_FALSE;
device_features.sparseResidencyImage3D = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
if (!m_device->phy().features().sparseBinding) {
printf("%s Test requires unsupported sparseBinding feature. Skipped.\n", kSkipPrefix);
return;
}
VkImage image = VK_NULL_HANDLE;
VkResult result = VK_RESULT_MAX_ENUM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_1D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 512;
image_create_info.extent.height = 1;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
// 1D image w/ sparse residency is an error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00970");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
// 2D image w/ sparse residency when feature isn't available
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.extent.height = 64;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00971");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
// 3D image w/ sparse residency when feature isn't available
image_create_info.imageType = VK_IMAGE_TYPE_3D;
image_create_info.extent.depth = 8;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00972");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
}
TEST_F(VkLayerTest, SparseResidencyImageCreateUnsupportedSamples) {
TEST_DESCRIPTION("Create images with sparse residency with unsupported tiling or sample counts");
// Determine which device feature are available
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
// These tests require that the device support sparse residency for 2D images
if (VK_TRUE != device_features.sparseResidencyImage2D) {
printf("%s Test requires unsupported SparseResidencyImage2D feature. Skipped.\n", kSkipPrefix);
return;
}
// Mask out device features we don't want and initialize device state
device_features.sparseResidency2Samples = VK_FALSE;
device_features.sparseResidency4Samples = VK_FALSE;
device_features.sparseResidency8Samples = VK_FALSE;
device_features.sparseResidency16Samples = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
VkImage image = VK_NULL_HANDLE;
VkResult result = VK_RESULT_MAX_ENUM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_BINDING_BIT;
// 2D image w/ sparse residency and linear tiling is an error
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT then image tiling of VK_IMAGE_TILING_LINEAR is not supported");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Multi-sample image w/ sparse residency when feature isn't available (4 flavors)
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00973");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00974");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.samples = VK_SAMPLE_COUNT_8_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00975");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
image_create_info.samples = VK_SAMPLE_COUNT_16_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00976");
result = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == result) {
vkDestroyImage(m_device->device(), image, NULL);
image = VK_NULL_HANDLE;
}
}
TEST_F(VkLayerTest, InvalidMemoryAliasing) {
TEST_DESCRIPTION(
"Create a buffer and image, allocate memory, and bind the buffer and image to memory such that they will alias.");
VkResult err;
bool pass;
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer, buffer2;
VkImage image;
VkImage image2;
VkDeviceMemory mem; // buffer will be bound first
VkDeviceMemory mem_img; // image bound first
VkMemoryRequirements buff_mem_reqs, img_mem_reqs;
VkMemoryRequirements buff_mem_reqs2, img_mem_reqs2;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buf_info.size = 256;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buf_info.flags = 0;
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &buff_mem_reqs);
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
// Image tiling must be optimal to trigger error when aliasing linear buffer
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &img_mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
// Ensure memory is big enough for both bindings
alloc_info.allocationSize = buff_mem_reqs.size + img_mem_reqs.size;
pass = m_device->phy().set_memory_type(buff_mem_reqs.memoryTypeBits & img_mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
if (!pass) {
printf("%s Failed to set memory type.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkDestroyImage(m_device->device(), image, NULL);
vkDestroyImage(m_device->device(), image2, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image2, &img_mem_reqs2);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " is aliased with linear buffer 0x");
// VALIDATION FAILURE due to image mapping overlapping buffer mapping
err = vkBindImageMemory(m_device->device(), image, mem, 0);
m_errorMonitor->VerifyFound();
// Now correctly bind image2 to second mem allocation before incorrectly
// aliasing buffer2
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer2);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem_img);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image2, mem_img, 0);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "is aliased with non-linear image 0x");
vkGetBufferMemoryRequirements(m_device->device(), buffer2, &buff_mem_reqs2);
err = vkBindBufferMemory(m_device->device(), buffer2, mem_img, 0);
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkDestroyBuffer(m_device->device(), buffer2, NULL);
vkDestroyImage(m_device->device(), image, NULL);
vkDestroyImage(m_device->device(), image2, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
vkFreeMemory(m_device->device(), mem_img, NULL);
}
TEST_F(VkLayerTest, InvalidMemoryMapping) {
TEST_DESCRIPTION("Attempt to map memory in a number of incorrect ways");
VkResult err;
bool pass;
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buf_info.size = 256;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buf_info.flags = 0;
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
// Ensure memory is big enough for both bindings
static const VkDeviceSize allocation_size = 0x10000;
alloc_info.allocationSize = allocation_size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
printf("%s Failed to set memory type.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
uint8_t *pData;
// Attempt to map memory size 0 is invalid
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VkMapMemory: Attempting to map memory range of size zero");
err = vkMapMemory(m_device->device(), mem, 0, 0, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// Map memory twice
err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VkMapMemory: Attempting to map memory on an already-mapped object ");
err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// Unmap the memory to avoid re-map error
vkUnmapMemory(m_device->device(), mem);
// overstep allocation with VK_WHOLE_SIZE
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" with size of VK_WHOLE_SIZE oversteps total array size 0x");
err = vkMapMemory(m_device->device(), mem, allocation_size + 1, VK_WHOLE_SIZE, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// overstep allocation w/o VK_WHOLE_SIZE
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " oversteps total array size 0x");
err = vkMapMemory(m_device->device(), mem, 1, allocation_size, 0, (void **)&pData);
m_errorMonitor->VerifyFound();
// Now error due to unmapping memory that's not mapped
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Unmapping Memory without memory being mapped: ");
vkUnmapMemory(m_device->device(), mem);
m_errorMonitor->VerifyFound();
// Now map memory and cause errors due to flushing invalid ranges
err = vkMapMemory(m_device->device(), mem, 4 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
VkMappedMemoryRange mmr = {};
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = atom_size; // Error b/c offset less than offset of mapped mem
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685");
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Now flush range that oversteps mapped range
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = atom_size;
mmr.size = 4 * atom_size; // Flushing bounds exceed mapped bounds
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00685");
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Now flush range with VK_WHOLE_SIZE that oversteps offset
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 2 * atom_size, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = atom_size;
mmr.size = VK_WHOLE_SIZE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-00686");
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Some platforms have an atomsize of 1 which makes the test meaningless
if (atom_size > 3) {
// Now with an offset NOT a multiple of the device limit
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = 3; // Not a multiple of atom_size
mmr.size = VK_WHOLE_SIZE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-offset-00687");
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
// Now with a size NOT a multiple of the device limit
vkUnmapMemory(m_device->device(), mem);
err = vkMapMemory(m_device->device(), mem, 0, 4 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.offset = atom_size;
mmr.size = 2 * atom_size + 1; // Not a multiple of atom_size
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkMappedMemoryRange-size-01390");
vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
m_errorMonitor->VerifyFound();
}
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
printf("%s Failed to set memory type.\n", kSkipPrefix);
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
// TODO : If we can get HOST_VISIBLE w/o HOST_COHERENT we can test cases of
// MEMTRACK_INVALID_MAP in validateAndCopyNoncoherentMemoryToDriver()
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, MapMemWithoutHostVisibleBit) {
TEST_DESCRIPTION("Allocate memory that is not mappable and then attempt to map it.");
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMapMemory-memory-00682");
ASSERT_NO_FATAL_FAILURE(Init());
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 1024;
pass = m_device->phy().set_memory_type(0xFFFFFFFF, &mem_alloc, 0, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) { // If we can't find any unmappable memory this test doesn't
// make sense
printf("%s No unmappable memory types found, skipping test\n", kSkipPrefix);
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
void *mappedAddress = NULL;
err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, &mappedAddress);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, RebindMemory) {
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which has already been bound to mem object");
ASSERT_NO_FATAL_FAILURE(Init());
// Create an image, allocate memory, free it, and then try to bind it
VkImage image;
VkDeviceMemory mem1;
VkDeviceMemory mem2;
VkMemoryRequirements mem_reqs;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
// Introduce failure, do NOT set memProps to
// VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
mem_alloc.memoryTypeIndex = 1;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
// allocate 2 memory objects
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem1);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem2);
ASSERT_VK_SUCCESS(err);
// Bind first memory object to Image object
err = vkBindImageMemory(m_device->device(), image, mem1, 0);
ASSERT_VK_SUCCESS(err);
// Introduce validation failure, try to bind a different memory object to
// the same image object
err = vkBindImageMemory(m_device->device(), image, mem2, 0);
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image, NULL);
vkFreeMemory(m_device->device(), mem1, NULL);
vkFreeMemory(m_device->device(), mem2, NULL);
}
TEST_F(VkLayerTest, SubmitSignaledFence) {
vk_testing::Fence testFence;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"submitted in SIGNALED state. Fences must be reset before being submitted");
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL;
fenceInfo.flags = VK_FENCE_CREATE_SIGNALED_BIT;
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color);
m_commandBuffer->end();
testFence.init(*m_device, fenceInfo);
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, testFence.handle());
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidUsageBits) {
TEST_DESCRIPTION(
"Specify wrong usage for image then create conflicting view of image Initialize buffer with wrong usage then perform copy "
"expecting errors from both the image and the buffer (2 calls)");
ASSERT_NO_FATAL_FAILURE(Init());
auto format = FindSupportedDepthStencilFormat(gpu());
if (!format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageObj image(m_device);
// Initialize image with transfer source usage
image.Init(128, 128, 1, format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView dsv;
VkImageViewCreateInfo dsvci = {};
dsvci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
dsvci.image = image.handle();
dsvci.viewType = VK_IMAGE_VIEW_TYPE_2D;
dsvci.format = format;
dsvci.subresourceRange.layerCount = 1;
dsvci.subresourceRange.baseMipLevel = 0;
dsvci.subresourceRange.levelCount = 1;
dsvci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
// Create a view with depth / stencil aspect for image with different usage
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid usage flag for Image ");
vkCreateImageView(m_device->device(), &dsvci, NULL, &dsv);
m_errorMonitor->VerifyFound();
// Initialize buffer with TRANSFER_DST usage
VkBufferObj buffer;
VkMemoryPropertyFlags reqs = 0;
buffer.init_as_dst(*m_device, 128 * 128, reqs);
VkBufferImageCopy region = {};
region.bufferRowLength = 128;
region.bufferImageHeight = 128;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.height = 16;
region.imageExtent.width = 16;
region.imageExtent.depth = 1;
// Buffer usage not set to TRANSFER_SRC and image usage not set to TRANSFER_DST
m_commandBuffer->begin();
// two separate errors from this call:
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImage-00177");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-srcBuffer-00174");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, LeakAnObject) {
VkResult err;
TEST_DESCRIPTION("Create a fence and destroy its device without first destroying the fence.");
// Note that we have to create a new device since destroying the
// framework's device causes Teardown() to fail and just calling Teardown
// will destroy the errorMonitor.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has not been destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
// The sacrificial device object
VkDevice testDevice;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
err = vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice);
ASSERT_VK_SUCCESS(err);
VkFence fence;
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_create_info.pNext = NULL;
fence_create_info.flags = 0;
err = vkCreateFence(testDevice, &fence_create_info, NULL, &fence);
ASSERT_VK_SUCCESS(err);
// Induce failure by not calling vkDestroyFence
vkDestroyDevice(testDevice, NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCommandPoolConsistency) {
TEST_DESCRIPTION("Allocate command buffers from one command pool and attempt to delete them from another.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeCommandBuffers is attempting to free Command Buffer");
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandPool command_pool_one;
VkCommandPool command_pool_two;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_one);
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool_two);
VkCommandBuffer cb;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool_one;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &cb);
vkFreeCommandBuffers(m_device->device(), command_pool_two, 1, &cb);
m_errorMonitor->VerifyFound();
vkDestroyCommandPool(m_device->device(), command_pool_one, NULL);
vkDestroyCommandPool(m_device->device(), command_pool_two, NULL);
}
TEST_F(VkLayerTest, InvalidDescriptorPoolConsistency) {
VkResult err;
TEST_DESCRIPTION("Allocate descriptor sets from one DS pool and attempt to delete them from another.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "FreeDescriptorSets is attempting to free descriptorSet");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.flags = 0;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool bad_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &bad_pool);
ASSERT_VK_SUCCESS(err);
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
err = vkFreeDescriptorSets(m_device->device(), bad_pool, 1, &ds.set_);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorPool(m_device->device(), bad_pool, NULL);
}
TEST_F(VkLayerTest, CreateUnknownObject) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageMemoryRequirements-image-parameter");
TEST_DESCRIPTION("Pass an invalid image object handle into a Vulkan API call.");
ASSERT_NO_FATAL_FAILURE(Init());
// Pass bogus handle into GetImageMemoryRequirements
VkMemoryRequirements mem_reqs;
uint64_t fakeImageHandle = 0xCADECADE;
VkImage fauxImage = reinterpret_cast<VkImage &>(fakeImageHandle);
vkGetImageMemoryRequirements(m_device->device(), fauxImage, &mem_reqs);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, UseObjectWithWrongDevice) {
TEST_DESCRIPTION(
"Try to destroy a render pass object using a device other than the one it was created on. This should generate a distinct "
"error from the invalid handle error.");
// Create first device and renderpass
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create second device
float priorities[] = {1.0f};
VkDeviceQueueCreateInfo queue_info{};
queue_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_info.pNext = NULL;
queue_info.flags = 0;
queue_info.queueFamilyIndex = 0;
queue_info.queueCount = 1;
queue_info.pQueuePriorities = &priorities[0];
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = 1;
device_create_info.pQueueCreateInfos = &queue_info;
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
VkDevice second_device;
ASSERT_VK_SUCCESS(vkCreateDevice(gpu(), &device_create_info, NULL, &second_device));
// Try to destroy the renderpass from the first device using the second device
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-parent");
vkDestroyRenderPass(second_device, m_renderPass, NULL);
m_errorMonitor->VerifyFound();
vkDestroyDevice(second_device, NULL);
}
TEST_F(VkLayerTest, PipelineNotBound) {
TEST_DESCRIPTION("Pass in an invalid pipeline object handle into a Vulkan API call.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipeline badPipeline = (VkPipeline)((size_t)0xbaadb1be);
m_commandBuffer->begin();
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, badPipeline);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, BindImageInvalidMemoryType) {
VkResult err;
TEST_DESCRIPTION("Test validation check for an invalid memory type index during bind[Buffer|Image]Memory time");
ASSERT_NO_FATAL_FAILURE(Init());
// Create an image, allocate memory, set a bad typeIndex and then try to
// bind it
VkImage image;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
// Introduce Failure, select invalid TypeIndex
VkPhysicalDeviceMemoryProperties memory_info;
vkGetPhysicalDeviceMemoryProperties(gpu(), &memory_info);
unsigned int i;
for (i = 0; i < memory_info.memoryTypeCount; i++) {
if ((mem_reqs.memoryTypeBits & (1 << i)) == 0) {
mem_alloc.memoryTypeIndex = i;
break;
}
}
if (i >= memory_info.memoryTypeCount) {
printf("%s No invalid memory type index could be found; skipped.\n", kSkipPrefix);
vkDestroyImage(m_device->device(), image, NULL);
return;
}
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "for this object type are not compatible with the memory");
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image, mem, 0);
(void)err;
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, BindInvalidMemory) {
VkResult err;
bool pass;
ASSERT_NO_FATAL_FAILURE(Init());
const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
const int32_t tex_width = 256;
const int32_t tex_height = 256;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = NULL;
buffer_create_info.flags = 0;
buffer_create_info.size = 4 * 1024 * 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
buffer_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
// Create an image/buffer, allocate memory, free it, and then try to bind it
{
VkImage image = VK_NULL_HANDLE;
VkBuffer buffer = VK_NULL_HANDLE;
err = vkCreateImage(device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {};
vkGetImageMemoryRequirements(device(), image, &image_mem_reqs);
vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs);
VkMemoryAllocateInfo image_mem_alloc = {}, buffer_mem_alloc = {};
image_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
image_mem_alloc.allocationSize = image_mem_reqs.size;
pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_mem_alloc, 0);
ASSERT_TRUE(pass);
buffer_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer_mem_alloc.allocationSize = buffer_mem_reqs.size;
pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_mem_alloc, 0);
ASSERT_TRUE(pass);
VkDeviceMemory image_mem = VK_NULL_HANDLE, buffer_mem = VK_NULL_HANDLE;
err = vkAllocateMemory(device(), &image_mem_alloc, NULL, &image_mem);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(device(), &buffer_mem_alloc, NULL, &buffer_mem);
ASSERT_VK_SUCCESS(err);
vkFreeMemory(device(), image_mem, NULL);
vkFreeMemory(device(), buffer_mem, NULL);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-parameter");
err = vkBindImageMemory(device(), image, image_mem, 0);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-parameter");
err = vkBindBufferMemory(device(), buffer, buffer_mem, 0);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
}
// Try to bind memory to an object that already has a memory binding
{
VkImage image = VK_NULL_HANDLE;
err = vkCreateImage(device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkBuffer buffer = VK_NULL_HANDLE;
err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {};
vkGetImageMemoryRequirements(device(), image, &image_mem_reqs);
vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs);
VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {};
image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
image_alloc_info.allocationSize = image_mem_reqs.size;
buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer_alloc_info.allocationSize = buffer_mem_reqs.size;
pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0);
ASSERT_TRUE(pass);
pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0);
ASSERT_TRUE(pass);
VkDeviceMemory image_mem, buffer_mem;
err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(device(), image, image_mem, 0);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01044");
err = vkBindImageMemory(device(), image, image_mem, 0);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
err = vkBindBufferMemory(device(), buffer, buffer_mem, 0);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01029");
err = vkBindBufferMemory(device(), buffer, buffer_mem, 0);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
vkFreeMemory(device(), image_mem, NULL);
vkFreeMemory(device(), buffer_mem, NULL);
vkDestroyImage(device(), image, NULL);
vkDestroyBuffer(device(), buffer, NULL);
}
// Try to bind memory to an object with an invalid memoryOffset
{
VkImage image = VK_NULL_HANDLE;
err = vkCreateImage(device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkBuffer buffer = VK_NULL_HANDLE;
err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {};
vkGetImageMemoryRequirements(device(), image, &image_mem_reqs);
vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs);
VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {};
image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
// Leave some extra space for alignment wiggle room
image_alloc_info.allocationSize = image_mem_reqs.size + image_mem_reqs.alignment;
buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer_alloc_info.allocationSize = buffer_mem_reqs.size + buffer_mem_reqs.alignment;
pass = m_device->phy().set_memory_type(image_mem_reqs.memoryTypeBits, &image_alloc_info, 0);
ASSERT_TRUE(pass);
pass = m_device->phy().set_memory_type(buffer_mem_reqs.memoryTypeBits, &buffer_alloc_info, 0);
ASSERT_TRUE(pass);
VkDeviceMemory image_mem, buffer_mem;
err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem);
ASSERT_VK_SUCCESS(err);
// Test unaligned memory offset
{
if (image_mem_reqs.alignment > 1) {
VkDeviceSize image_offset = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01048");
err = vkBindImageMemory(device(), image, image_mem, image_offset);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
}
if (buffer_mem_reqs.alignment > 1) {
VkDeviceSize buffer_offset = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01036");
err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
}
}
// Test memory offsets outside the memory allocation
{
VkDeviceSize image_offset =
(image_alloc_info.allocationSize + image_mem_reqs.alignment) & ~(image_mem_reqs.alignment - 1);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memoryOffset-01046");
err = vkBindImageMemory(device(), image, image_mem, image_offset);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
VkDeviceSize buffer_offset =
(buffer_alloc_info.allocationSize + buffer_mem_reqs.alignment) & ~(buffer_mem_reqs.alignment - 1);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memoryOffset-01031");
err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
}
// Test memory offsets within the memory allocation, but which leave too little memory for
// the resource.
{
VkDeviceSize image_offset = (image_mem_reqs.size - 1) & ~(image_mem_reqs.alignment - 1);
if ((image_offset > 0) && (image_mem_reqs.size < (image_alloc_info.allocationSize - image_mem_reqs.alignment))) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-size-01049");
err = vkBindImageMemory(device(), image, image_mem, image_offset);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
}
VkDeviceSize buffer_offset = (buffer_mem_reqs.size - 1) & ~(buffer_mem_reqs.alignment - 1);
if (buffer_offset > 0) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-size-01037");
err = vkBindBufferMemory(device(), buffer, buffer_mem, buffer_offset);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
}
}
vkFreeMemory(device(), image_mem, NULL);
vkFreeMemory(device(), buffer_mem, NULL);
vkDestroyImage(device(), image, NULL);
vkDestroyBuffer(device(), buffer, NULL);
}
// Try to bind memory to an object with an invalid memory type
{
VkImage image = VK_NULL_HANDLE;
err = vkCreateImage(device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkBuffer buffer = VK_NULL_HANDLE;
err = vkCreateBuffer(device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements image_mem_reqs = {}, buffer_mem_reqs = {};
vkGetImageMemoryRequirements(device(), image, &image_mem_reqs);
vkGetBufferMemoryRequirements(device(), buffer, &buffer_mem_reqs);
VkMemoryAllocateInfo image_alloc_info = {}, buffer_alloc_info = {};
image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
image_alloc_info.allocationSize = image_mem_reqs.size;
buffer_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
buffer_alloc_info.allocationSize = buffer_mem_reqs.size;
// Create a mask of available memory types *not* supported by these resources,
// and try to use one of them.
VkPhysicalDeviceMemoryProperties memory_properties = {};
vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memory_properties);
VkDeviceMemory image_mem, buffer_mem;
uint32_t image_unsupported_mem_type_bits = ((1 << memory_properties.memoryTypeCount) - 1) & ~image_mem_reqs.memoryTypeBits;
if (image_unsupported_mem_type_bits != 0) {
pass = m_device->phy().set_memory_type(image_unsupported_mem_type_bits, &image_alloc_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(device(), &image_alloc_info, NULL, &image_mem);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01047");
err = vkBindImageMemory(device(), image, image_mem, 0);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
vkFreeMemory(device(), image_mem, NULL);
}
uint32_t buffer_unsupported_mem_type_bits =
((1 << memory_properties.memoryTypeCount) - 1) & ~buffer_mem_reqs.memoryTypeBits;
if (buffer_unsupported_mem_type_bits != 0) {
pass = m_device->phy().set_memory_type(buffer_unsupported_mem_type_bits, &buffer_alloc_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(device(), &buffer_alloc_info, NULL, &buffer_mem);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01035");
err = vkBindBufferMemory(device(), buffer, buffer_mem, 0);
(void)err; // This may very well return an error.
m_errorMonitor->VerifyFound();
vkFreeMemory(device(), buffer_mem, NULL);
}
vkDestroyImage(device(), image, NULL);
vkDestroyBuffer(device(), buffer, NULL);
}
// Try to bind memory to an image created with sparse memory flags
{
VkImageCreateInfo sparse_image_create_info = image_create_info;
sparse_image_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
VkImageFormatProperties image_format_properties = {};
err = vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), sparse_image_create_info.format,
sparse_image_create_info.imageType, sparse_image_create_info.tiling,
sparse_image_create_info.usage, sparse_image_create_info.flags,
&image_format_properties);
if (!m_device->phy().features().sparseResidencyImage2D || err == VK_ERROR_FORMAT_NOT_SUPPORTED) {
// most likely means sparse formats aren't supported here; skip this test.
} else {
ASSERT_VK_SUCCESS(err);
if (image_format_properties.maxExtent.width == 0) {
printf("%s Sparse image format not supported; skipped.\n", kSkipPrefix);
return;
} else {
VkImage sparse_image = VK_NULL_HANDLE;
err = vkCreateImage(m_device->device(), &sparse_image_create_info, NULL, &sparse_image);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements sparse_mem_reqs = {};
vkGetImageMemoryRequirements(m_device->device(), sparse_image, &sparse_mem_reqs);
if (sparse_mem_reqs.memoryTypeBits != 0) {
VkMemoryAllocateInfo sparse_mem_alloc = {};
sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
sparse_mem_alloc.pNext = NULL;
sparse_mem_alloc.allocationSize = sparse_mem_reqs.size;
sparse_mem_alloc.memoryTypeIndex = 0;
pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0);
ASSERT_TRUE(pass);
VkDeviceMemory sparse_mem = VK_NULL_HANDLE;
err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-01045");
err = vkBindImageMemory(m_device->device(), sparse_image, sparse_mem, 0);
// This may very well return an error.
(void)err;
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), sparse_mem, NULL);
}
vkDestroyImage(m_device->device(), sparse_image, NULL);
}
}
}
// Try to bind memory to a buffer created with sparse memory flags
{
VkBufferCreateInfo sparse_buffer_create_info = buffer_create_info;
sparse_buffer_create_info.flags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
if (!m_device->phy().features().sparseResidencyBuffer) {
// most likely means sparse formats aren't supported here; skip this test.
} else {
VkBuffer sparse_buffer = VK_NULL_HANDLE;
err = vkCreateBuffer(m_device->device(), &sparse_buffer_create_info, NULL, &sparse_buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements sparse_mem_reqs = {};
vkGetBufferMemoryRequirements(m_device->device(), sparse_buffer, &sparse_mem_reqs);
if (sparse_mem_reqs.memoryTypeBits != 0) {
VkMemoryAllocateInfo sparse_mem_alloc = {};
sparse_mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
sparse_mem_alloc.pNext = NULL;
sparse_mem_alloc.allocationSize = sparse_mem_reqs.size;
sparse_mem_alloc.memoryTypeIndex = 0;
pass = m_device->phy().set_memory_type(sparse_mem_reqs.memoryTypeBits, &sparse_mem_alloc, 0);
ASSERT_TRUE(pass);
VkDeviceMemory sparse_mem = VK_NULL_HANDLE;
err = vkAllocateMemory(m_device->device(), &sparse_mem_alloc, NULL, &sparse_mem);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-01030");
err = vkBindBufferMemory(m_device->device(), sparse_buffer, sparse_mem, 0);
// This may very well return an error.
(void)err;
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), sparse_mem, NULL);
}
vkDestroyBuffer(m_device->device(), sparse_buffer, NULL);
}
}
}
TEST_F(VkLayerTest, BindMemoryToDestroyedObject) {
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-image-parameter");
ASSERT_NO_FATAL_FAILURE(Init());
// Create an image object, allocate memory, destroy the object and then try
// to bind it
VkImage image;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
// Allocate memory
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
// Introduce validation failure, destroy Image object before binding
vkDestroyImage(m_device->device(), image, NULL);
ASSERT_VK_SUCCESS(err);
// Now Try to bind memory to this destroyed object
err = vkBindImageMemory(m_device->device(), image, mem, 0);
// This may very well return an error.
(void)err;
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, ExceedMemoryAllocationCount) {
VkResult err = VK_SUCCESS;
const int max_mems = 32;
VkDeviceMemory mems[max_mems + 1];
if (!EnableDeviceProfileLayer()) {
printf("%s Failed to enable device profile layer.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT =
(PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT");
PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT =
(PFN_vkGetOriginalPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceLimitsEXT");
if (!(fpvkSetPhysicalDeviceLimitsEXT) || !(fpvkGetOriginalPhysicalDeviceLimitsEXT)) {
printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix);
return;
}
VkPhysicalDeviceProperties props;
fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits);
if (props.limits.maxMemoryAllocationCount > max_mems) {
props.limits.maxMemoryAllocationCount = max_mems;
fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits);
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Number of currently valid memory objects is not less than the maximum allowed");
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.memoryTypeIndex = 0;
mem_alloc.allocationSize = 4;
int i;
for (i = 0; i <= max_mems; i++) {
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mems[i]);
if (err != VK_SUCCESS) {
break;
}
}
m_errorMonitor->VerifyFound();
for (int j = 0; j < i; j++) {
vkFreeMemory(m_device->device(), mems[j], NULL);
}
}
TEST_F(VkLayerTest, CreatePipelineBadVertexAttributeFormat) {
TEST_DESCRIPTION("Test that pipeline validation catches invalid vertex attribute formats");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attribs;
memset(&input_attribs, 0, sizeof(input_attribs));
// Pick a really bad format for this purpose and make sure it should fail
input_attribs.format = VK_FORMAT_BC2_UNORM_BLOCK;
VkFormatProperties format_props = m_device->format_properties(input_attribs.format);
if ((format_props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != 0) {
printf("%s Format unsuitable for test; skipped.\n", kSkipPrefix);
return;
}
input_attribs.location = 0;
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-format-00623");
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(&input_attribs, 1);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ImageSampleCounts) {
TEST_DESCRIPTION("Use bad sample counts in image transfer calls to trigger validation errors.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
VkMemoryPropertyFlags reqs = 0;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 256;
image_create_info.extent.height = 256;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.flags = 0;
VkImageBlit blit_region = {};
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.srcSubresource.baseArrayLayer = 0;
blit_region.srcSubresource.layerCount = 1;
blit_region.srcSubresource.mipLevel = 0;
blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.dstSubresource.baseArrayLayer = 0;
blit_region.dstSubresource.layerCount = 1;
blit_region.dstSubresource.mipLevel = 0;
blit_region.srcOffsets[0] = {0, 0, 0};
blit_region.srcOffsets[1] = {256, 256, 1};
blit_region.dstOffsets[0] = {0, 0, 0};
blit_region.dstOffsets[1] = {128, 128, 1};
// Create two images, the source with sampleCount = 4, and attempt to blit
// between them
{
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
VkImageObj src_image(m_device);
src_image.init(&image_create_info);
src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageObj dst_image(m_device);
dst_image.init(&image_create_info);
dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
m_commandBuffer->begin();
// TODO: These 2 VUs are redundant - expect one of them to go away
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228");
vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
// Create two images, the dest with sampleCount = 4, and attempt to blit
// between them
{
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
VkImageObj src_image(m_device);
src_image.init(&image_create_info);
src_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageObj dst_image(m_device);
dst_image.init(&image_create_info);
dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
m_commandBuffer->begin();
// TODO: These 2 VUs are redundant - expect one of them to go away
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228");
vkCmdBlitImage(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst_image.handle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
VkBufferImageCopy copy_region = {};
copy_region.bufferRowLength = 128;
copy_region.bufferImageHeight = 128;
copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.imageSubresource.layerCount = 1;
copy_region.imageExtent.height = 64;
copy_region.imageExtent.width = 64;
copy_region.imageExtent.depth = 1;
// Create src buffer and dst image with sampleCount = 4 and attempt to copy
// buffer to image
{
VkBufferObj src_buffer;
src_buffer.init_as_src(*m_device, 128 * 128 * 4, reqs);
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageObj dst_image(m_device);
dst_image.init(&image_create_info);
dst_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), src_buffer.handle(), dst_image.handle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
// Create dst buffer and src image with sampleCount = 4 and attempt to copy
// image to buffer
{
VkBufferObj dst_buffer;
dst_buffer.init_as_dst(*m_device, 128 * 128 * 4, reqs);
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
vk_testing::Image src_image;
src_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"was created with a sample count of VK_SAMPLE_COUNT_4_BIT but must be VK_SAMPLE_COUNT_1_BIT");
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), src_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
dst_buffer.handle(), 1, &copy_region);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
}
TEST_F(VkLayerTest, BlitImageFormatTypes) {
ASSERT_NO_FATAL_FAILURE(Init());
VkFormat f_unsigned = VK_FORMAT_R8G8B8A8_UINT;
VkFormat f_signed = VK_FORMAT_R8G8B8A8_SINT;
VkFormat f_float = VK_FORMAT_R32_SFLOAT;
VkFormat f_depth = VK_FORMAT_D32_SFLOAT_S8_UINT;
VkFormat f_depth2 = VK_FORMAT_D32_SFLOAT;
if (!ImageFormatIsSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL) ||
!ImageFormatIsSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL) ||
!ImageFormatIsSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL) ||
!ImageFormatIsSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL) ||
!ImageFormatIsSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL)) {
printf("%s Requested formats not supported - BlitImageFormatTypes skipped.\n", kSkipPrefix);
return;
}
// Note any missing feature bits
bool usrc = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT);
bool udst = !ImageFormatAndFeaturesSupported(gpu(), f_unsigned, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT);
bool ssrc = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT);
bool sdst = !ImageFormatAndFeaturesSupported(gpu(), f_signed, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT);
bool fsrc = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT);
bool fdst = !ImageFormatAndFeaturesSupported(gpu(), f_float, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT);
bool d1dst = !ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT);
bool d2src = !ImageFormatAndFeaturesSupported(gpu(), f_depth2, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT);
VkImageObj unsigned_image(m_device);
unsigned_image.Init(64, 64, 1, f_unsigned, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(unsigned_image.initialized());
unsigned_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageObj signed_image(m_device);
signed_image.Init(64, 64, 1, f_signed, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(signed_image.initialized());
signed_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageObj float_image(m_device);
float_image.Init(64, 64, 1, f_float, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL,
0);
ASSERT_TRUE(float_image.initialized());
float_image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageObj depth_image(m_device);
depth_image.Init(64, 64, 1, f_depth, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL,
0);
ASSERT_TRUE(depth_image.initialized());
depth_image.SetLayout(VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageObj depth_image2(m_device);
depth_image2.Init(64, 64, 1, f_depth2, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(depth_image2.initialized());
depth_image2.SetLayout(VK_IMAGE_ASPECT_DEPTH_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageBlit blitRegion = {};
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = 0;
blitRegion.srcOffsets[0] = {0, 0, 0};
blitRegion.srcOffsets[1] = {64, 64, 1};
blitRegion.dstOffsets[0] = {0, 0, 0};
blitRegion.dstOffsets[1] = {32, 32, 1};
m_commandBuffer->begin();
// Unsigned int vs not an int
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230");
if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), float_image.image(),
float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230");
if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), unsigned_image.image(),
unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Signed int vs not an int,
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229");
if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (fdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), float_image.image(),
float_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229");
if (fsrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), float_image.image(), float_image.Layout(), signed_image.image(),
signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Signed vs Unsigned int - generates both VUs
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230");
if (ssrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (udst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), signed_image.image(), signed_image.Layout(), unsigned_image.image(),
unsigned_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00229");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00230");
if (usrc) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (sdst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), unsigned_image.image(), unsigned_image.Layout(), signed_image.image(),
signed_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Depth vs any non-identical depth format
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00231");
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
if (d2src) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00218");
if (d1dst) m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), depth_image2.image(), depth_image2.Layout(), depth_image.image(),
depth_image.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, BlitImageFilters) {
bool cubic_support = false;
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, "VK_IMG_filter_cubic")) {
m_device_extension_names.push_back("VK_IMG_filter_cubic");
cubic_support = true;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkFormat fmt = VK_FORMAT_R8_UINT;
if (!ImageFormatIsSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL)) {
printf("%s No R8_UINT format support - BlitImageFilters skipped.\n", kSkipPrefix);
return;
}
// Create 2D images
VkImageObj src2D(m_device);
VkImageObj dst2D(m_device);
src2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
dst2D.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(src2D.initialized());
ASSERT_TRUE(dst2D.initialized());
src2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
dst2D.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
// Create 3D image
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_3D;
ci.format = fmt;
ci.extent = {64, 64, 4};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj src3D(m_device);
src3D.init(&ci);
ASSERT_TRUE(src3D.initialized());
VkImageBlit blitRegion = {};
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = 0;
blitRegion.srcOffsets[0] = {0, 0, 0};
blitRegion.srcOffsets[1] = {48, 48, 1};
blitRegion.dstOffsets[0] = {0, 0, 0};
blitRegion.dstOffsets[1] = {64, 64, 1};
m_commandBuffer->begin();
// UINT format should not support linear filtering, but check to be sure
if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT)) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00235");
vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion,
VK_FILTER_LINEAR);
m_errorMonitor->VerifyFound();
}
if (cubic_support && !ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG)) {
// Invalid filter CUBIC_IMG
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00236");
vkCmdBlitImage(m_commandBuffer->handle(), src3D.image(), src3D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion,
VK_FILTER_CUBIC_IMG);
m_errorMonitor->VerifyFound();
// Invalid filter CUBIC_IMG + invalid 2D source image
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00236");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-filter-00237");
vkCmdBlitImage(m_commandBuffer->handle(), src2D.image(), src2D.Layout(), dst2D.image(), dst2D.Layout(), 1, &blitRegion,
VK_FILTER_CUBIC_IMG);
m_errorMonitor->VerifyFound();
}
m_commandBuffer->end();
}
TEST_F(VkLayerTest, BlitImageLayout) {
TEST_DESCRIPTION("Incorrect vkCmdBlitImage layouts");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
VkResult err;
VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM;
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Create images
VkImageObj img_src_transfer(m_device);
VkImageObj img_dst_transfer(m_device);
VkImageObj img_general(m_device);
VkImageObj img_color(m_device);
img_src_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
img_dst_transfer.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
img_general.InitNoLayout(64, 64, 1, fmt, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
img_color.InitNoLayout(64, 64, 1, fmt,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(img_src_transfer.initialized());
ASSERT_TRUE(img_dst_transfer.initialized());
ASSERT_TRUE(img_general.initialized());
ASSERT_TRUE(img_color.initialized());
img_src_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
img_dst_transfer.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
img_general.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
img_color.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
VkImageBlit blit_region = {};
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.srcSubresource.baseArrayLayer = 0;
blit_region.srcSubresource.layerCount = 1;
blit_region.srcSubresource.mipLevel = 0;
blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.dstSubresource.baseArrayLayer = 0;
blit_region.dstSubresource.layerCount = 1;
blit_region.dstSubresource.mipLevel = 0;
blit_region.srcOffsets[0] = {0, 0, 0};
blit_region.srcOffsets[1] = {48, 48, 1};
blit_region.dstOffsets[0] = {0, 0, 0};
blit_region.dstOffsets[1] = {64, 64, 1};
m_commandBuffer->begin();
// Illegal srcImageLayout
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00222");
vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
img_dst_transfer.image(), img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR);
m_errorMonitor->VerifyFound();
// Illegal destImageLayout
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00227");
vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(),
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR);
m_commandBuffer->end();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->reset(0);
m_commandBuffer->begin();
// Source image in invalid layout at start of the CB
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT, "layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL when first use is VK_IMAGE_LAYOUT_GENERAL");
vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_color.image(),
VK_IMAGE_LAYOUT_GENERAL, 1, &blit_region, VK_FILTER_LINEAR);
m_commandBuffer->end();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->reset(0);
m_commandBuffer->begin();
// Destination image in invalid layout at start of the CB
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT, "layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL when first use is VK_IMAGE_LAYOUT_GENERAL");
vkCmdBlitImage(m_commandBuffer->handle(), img_color.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(),
img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR);
m_commandBuffer->end();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
// Source image in invalid layout in the middle of CB
m_commandBuffer->reset(0);
m_commandBuffer->begin();
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = nullptr;
img_barrier.srcAccessMask = 0;
img_barrier.dstAccessMask = 0;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
img_barrier.image = img_general.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImageLayout-00221");
vkCmdBlitImage(m_commandBuffer->handle(), img_general.image(), VK_IMAGE_LAYOUT_GENERAL, img_dst_transfer.image(),
img_dst_transfer.Layout(), 1, &blit_region, VK_FILTER_LINEAR);
m_commandBuffer->end();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
// Destination image in invalid layout in the middle of CB
m_commandBuffer->reset(0);
m_commandBuffer->begin();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
img_barrier.image = img_dst_transfer.handle();
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImageLayout-00226");
vkCmdBlitImage(m_commandBuffer->handle(), img_src_transfer.image(), img_src_transfer.Layout(), img_dst_transfer.image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit_region, VK_FILTER_LINEAR);
m_commandBuffer->end();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
}
TEST_F(VkLayerTest, BlitImageOffsets) {
ASSERT_NO_FATAL_FAILURE(Init());
VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM;
if (!ImageFormatAndFeaturesSupported(gpu(), fmt, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) {
printf("%s No blit feature bits - BlitImageOffsets skipped.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_1D;
ci.format = fmt;
ci.extent = {64, 1, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj image_1D(m_device);
image_1D.init(&ci);
ASSERT_TRUE(image_1D.initialized());
ci.imageType = VK_IMAGE_TYPE_2D;
ci.extent = {64, 64, 1};
VkImageObj image_2D(m_device);
image_2D.init(&ci);
ASSERT_TRUE(image_2D.initialized());
ci.imageType = VK_IMAGE_TYPE_3D;
ci.extent = {64, 64, 64};
VkImageObj image_3D(m_device);
image_3D.init(&ci);
ASSERT_TRUE(image_3D.initialized());
VkImageBlit blit_region = {};
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.srcSubresource.baseArrayLayer = 0;
blit_region.srcSubresource.layerCount = 1;
blit_region.srcSubresource.mipLevel = 0;
blit_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.dstSubresource.baseArrayLayer = 0;
blit_region.dstSubresource.layerCount = 1;
blit_region.dstSubresource.mipLevel = 0;
m_commandBuffer->begin();
// 1D, with src/dest y offsets other than (0,1)
blit_region.srcOffsets[0] = {0, 1, 0};
blit_region.srcOffsets[1] = {30, 1, 1};
blit_region.dstOffsets[0] = {32, 0, 0};
blit_region.dstOffsets[1] = {64, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00245");
vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blit_region.srcOffsets[0] = {0, 0, 0};
blit_region.dstOffsets[0] = {32, 1, 0};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00250");
vkCmdBlitImage(m_commandBuffer->handle(), image_1D.image(), image_1D.Layout(), image_1D.image(), image_1D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// 2D, with src/dest z offsets other than (0,1)
blit_region.srcOffsets[0] = {0, 0, 1};
blit_region.srcOffsets[1] = {24, 31, 1};
blit_region.dstOffsets[0] = {32, 32, 0};
blit_region.dstOffsets[1] = {64, 64, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00247");
vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blit_region.srcOffsets[0] = {0, 0, 0};
blit_region.dstOffsets[0] = {32, 32, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstImage-00252");
vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_2D.image(), image_2D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Source offsets exceeding source image dimensions
blit_region.srcOffsets[0] = {0, 0, 0};
blit_region.srcOffsets[1] = {65, 64, 1}; // src x
blit_region.dstOffsets[0] = {0, 0, 0};
blit_region.dstOffsets[1] = {64, 64, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00243"); // x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region
vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blit_region.srcOffsets[1] = {64, 65, 1}; // src y
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00244"); // y
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region
vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blit_region.srcOffsets[0] = {0, 0, 65}; // src z
blit_region.srcOffsets[1] = {64, 64, 64};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcOffset-00246"); // z
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00215"); // src region
vkCmdBlitImage(m_commandBuffer->handle(), image_3D.image(), image_3D.Layout(), image_2D.image(), image_2D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Dest offsets exceeding source image dimensions
blit_region.srcOffsets[0] = {0, 0, 0};
blit_region.srcOffsets[1] = {64, 64, 1};
blit_region.dstOffsets[0] = {96, 64, 32}; // dst x
blit_region.dstOffsets[1] = {64, 0, 33};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00248"); // x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region
vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blit_region.dstOffsets[0] = {0, 65, 32}; // dst y
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00249"); // y
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region
vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blit_region.dstOffsets[0] = {0, 64, 65}; // dst z
blit_region.dstOffsets[1] = {64, 0, 64};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-dstOffset-00251"); // z
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-pRegions-00216"); // dst region
vkCmdBlitImage(m_commandBuffer->handle(), image_2D.image(), image_2D.Layout(), image_3D.image(), image_3D.Layout(), 1,
&blit_region, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, MiscBlitImageTests) {
ASSERT_NO_FATAL_FAILURE(Init());
VkFormat f_color = VK_FORMAT_R32_SFLOAT; // Need features ..BLIT_SRC_BIT & ..BLIT_DST_BIT
if (!ImageFormatAndFeaturesSupported(gpu(), f_color, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_BLIT_SRC_BIT | VK_FORMAT_FEATURE_BLIT_DST_BIT)) {
printf("%s Requested format features unavailable - MiscBlitImageTests skipped.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = f_color;
ci.extent = {64, 64, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// 2D color image
VkImageObj color_img(m_device);
color_img.init(&ci);
ASSERT_TRUE(color_img.initialized());
// 2D multi-sample image
ci.samples = VK_SAMPLE_COUNT_4_BIT;
VkImageObj ms_img(m_device);
ms_img.init(&ci);
ASSERT_TRUE(ms_img.initialized());
// 3D color image
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.imageType = VK_IMAGE_TYPE_3D;
ci.extent = {64, 64, 8};
VkImageObj color_3D_img(m_device);
color_3D_img.init(&ci);
ASSERT_TRUE(color_3D_img.initialized());
VkImageBlit blitRegion = {};
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = 0;
blitRegion.srcOffsets[0] = {0, 0, 0};
blitRegion.srcOffsets[1] = {16, 16, 1};
blitRegion.dstOffsets[0] = {32, 32, 0};
blitRegion.dstOffsets[1] = {64, 64, 1};
m_commandBuffer->begin();
// Blit with aspectMask errors
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00241");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-aspectMask-00242");
vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1,
&blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Blit with invalid src mip level
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.mipLevel = ci.mipLevels;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-srcSubresource-01705"); // invalid srcSubresource.mipLevel
// Redundant unavoidable errors
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-srcOffset-00243"); // out-of-bounds srcOffset.x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-srcOffset-00244"); // out-of-bounds srcOffset.y
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-srcOffset-00246"); // out-of-bounds srcOffset.z
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-pRegions-00215"); // region not contained within src image
vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1,
&blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Blit with invalid dst mip level
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.mipLevel = ci.mipLevels;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-dstSubresource-01706"); // invalid dstSubresource.mipLevel
// Redundant unavoidable errors
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-dstOffset-00248"); // out-of-bounds dstOffset.x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-dstOffset-00249"); // out-of-bounds dstOffset.y
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-dstOffset-00251"); // out-of-bounds dstOffset.z
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-pRegions-00216"); // region not contained within dst image
vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1,
&blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Blit with invalid src array layer
blitRegion.dstSubresource.mipLevel = 0;
blitRegion.srcSubresource.baseArrayLayer = ci.arrayLayers;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-srcSubresource-01707"); // invalid srcSubresource layer range
vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1,
&blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Blit with invalid dst array layer
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.baseArrayLayer = ci.arrayLayers;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-dstSubresource-01708"); // invalid dstSubresource layer range
// Redundant unavoidable errors
vkCmdBlitImage(m_commandBuffer->handle(), color_img.image(), color_img.Layout(), color_img.image(), color_img.Layout(), 1,
&blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blitRegion.dstSubresource.baseArrayLayer = 0;
// Blit multi-sample image
// TODO: redundant VUs, one (1c8) or two (1d2 & 1d4) should be eliminated.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00228");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-srcImage-00233");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00234");
vkCmdBlitImage(m_commandBuffer->handle(), ms_img.image(), ms_img.Layout(), ms_img.image(), ms_img.Layout(), 1, &blitRegion,
VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
// Blit 3D with baseArrayLayer != 0 or layerCount != 1
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdBlitImage-srcSubresource-01707"); // base+count > total layer count
vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(),
color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageBlit-srcImage-00240");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageSubresourceLayers-layerCount-01700"); // layer count == 0 (src)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageBlit-layerCount-00239"); // src/dst layer count mismatch
vkCmdBlitImage(m_commandBuffer->handle(), color_3D_img.image(), color_3D_img.Layout(), color_3D_img.image(),
color_3D_img.Layout(), 1, &blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, BlitToDepthImageTests) {
ASSERT_NO_FATAL_FAILURE(Init());
// Need feature ..BLIT_SRC_BIT but not ..BLIT_DST_BIT
// TODO: provide more choices here; supporting D32_SFLOAT as BLIT_DST isn't unheard of.
VkFormat f_depth = VK_FORMAT_D32_SFLOAT;
if (!ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_SRC_BIT) ||
ImageFormatAndFeaturesSupported(gpu(), f_depth, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_BLIT_DST_BIT)) {
printf("%s Requested format features unavailable - BlitToDepthImageTests skipped.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = f_depth;
ci.extent = {64, 64, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// 2D depth image
VkImageObj depth_img(m_device);
depth_img.init(&ci);
ASSERT_TRUE(depth_img.initialized());
VkImageBlit blitRegion = {};
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = 0;
blitRegion.srcOffsets[0] = {0, 0, 0};
blitRegion.srcOffsets[1] = {16, 16, 1};
blitRegion.dstOffsets[0] = {32, 32, 0};
blitRegion.dstOffsets[1] = {64, 64, 1};
m_commandBuffer->begin();
// Blit depth image - has SRC_BIT but not DST_BIT
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBlitImage-dstImage-00223");
vkCmdBlitImage(m_commandBuffer->handle(), depth_img.image(), depth_img.Layout(), depth_img.image(), depth_img.Layout(), 1,
&blitRegion, VK_FILTER_NEAREST);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, MinImageTransferGranularity) {
TEST_DESCRIPTION("Tests for validation of Queue Family property minImageTransferGranularity.");
ASSERT_NO_FATAL_FAILURE(Init());
auto queue_family_properties = m_device->phy().queue_properties();
auto large_granularity_family =
std::find_if(queue_family_properties.begin(), queue_family_properties.end(), [](VkQueueFamilyProperties family_properties) {
VkExtent3D family_granularity = family_properties.minImageTransferGranularity;
// We need a queue family that supports copy operations and has a large enough minImageTransferGranularity for the tests
// below to make sense.
return (family_properties.queueFlags & VK_QUEUE_TRANSFER_BIT || family_properties.queueFlags & VK_QUEUE_GRAPHICS_BIT ||
family_properties.queueFlags & VK_QUEUE_COMPUTE_BIT) &&
family_granularity.depth >= 4 && family_granularity.width >= 4 && family_granularity.height >= 4;
});
if (large_granularity_family == queue_family_properties.end()) {
printf("%s No queue family has a large enough granularity for this test to be meaningful, skipping test\n", kSkipPrefix);
return;
}
const size_t queue_family_index = std::distance(queue_family_properties.begin(), large_granularity_family);
VkExtent3D granularity = queue_family_properties[queue_family_index].minImageTransferGranularity;
VkCommandPoolObj command_pool(m_device, queue_family_index, 0);
// Create two images of different types and try to copy between them
VkImage srcImage;
VkImage dstImage;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_3D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = granularity.width * 2;
image_create_info.extent.height = granularity.height * 2;
image_create_info.extent.depth = granularity.depth * 2;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.flags = 0;
VkImageObj src_image_obj(m_device);
src_image_obj.init(&image_create_info);
ASSERT_TRUE(src_image_obj.initialized());
srcImage = src_image_obj.handle();
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageObj dst_image_obj(m_device);
dst_image_obj.init(&image_create_info);
ASSERT_TRUE(dst_image_obj.initialized());
dstImage = dst_image_obj.handle();
VkCommandBufferObj command_buffer(m_device, &command_pool);
ASSERT_TRUE(command_buffer.initialized());
command_buffer.begin();
VkImageCopy copyRegion;
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset.x = 0;
copyRegion.srcOffset.y = 0;
copyRegion.srcOffset.z = 0;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset.x = 0;
copyRegion.dstOffset.y = 0;
copyRegion.dstOffset.z = 0;
copyRegion.extent.width = granularity.width;
copyRegion.extent.height = granularity.height;
copyRegion.extent.depth = granularity.depth;
// Introduce failure by setting srcOffset to a bad granularity value
copyRegion.srcOffset.y = 3;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity
command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
// Introduce failure by setting extent to a granularity value that is bad
// for both the source and destination image.
copyRegion.srcOffset.y = 0;
copyRegion.extent.width = 3;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity
command_buffer.CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
// Now do some buffer/image copies
VkBufferObj buffer;
VkMemoryPropertyFlags reqs = 0;
buffer.init_as_src_and_dst(*m_device, 8 * granularity.height * granularity.width * granularity.depth, reqs);
VkBufferImageCopy region = {};
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.height = granularity.height;
region.imageExtent.width = granularity.width;
region.imageExtent.depth = granularity.depth;
region.imageOffset.x = 0;
region.imageOffset.y = 0;
region.imageOffset.z = 0;
// Introduce failure by setting imageExtent to a bad granularity value
region.imageExtent.width = 3;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
vkCmdCopyImageToBuffer(command_buffer.handle(), srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer.handle(), 1, &region);
m_errorMonitor->VerifyFound();
region.imageExtent.width = granularity.width;
// Introduce failure by setting imageOffset to a bad granularity value
region.imageOffset.z = 3;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity
vkCmdCopyBufferToImage(command_buffer.handle(), buffer.handle(), dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
m_errorMonitor->VerifyFound();
command_buffer.end();
}
TEST_F(VkLayerTest, MismatchedQueueFamiliesOnSubmit) {
TEST_DESCRIPTION(
"Submit command buffer created using one queue family and attempt to submit them on a queue created in a different queue "
"family.");
ASSERT_NO_FATAL_FAILURE(Init()); // assumes it initializes all queue families on vkCreateDevice
// This test is meaningless unless we have multiple queue families
auto queue_family_properties = m_device->phy().queue_properties();
std::vector<uint32_t> queue_families;
for (uint32_t i = 0; i < queue_family_properties.size(); ++i)
if (queue_family_properties[i].queueCount > 0) queue_families.push_back(i);
if (queue_families.size() < 2) {
printf("%s Device only has one queue family; skipped.\n", kSkipPrefix);
return;
}
const uint32_t queue_family = queue_families[0];
const uint32_t other_queue_family = queue_families[1];
VkQueue other_queue;
vkGetDeviceQueue(m_device->device(), other_queue_family, 0, &other_queue);
VkCommandPoolObj cmd_pool(m_device, queue_family);
VkCommandBufferObj cmd_buff(m_device, &cmd_pool);
cmd_buff.begin();
cmd_buff.end();
// Submit on the wrong queue
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmd_buff.handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00074");
vkQueueSubmit(other_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassAttachmentIndexOutOfRange) {
ASSERT_NO_FATAL_FAILURE(Init());
// There are no attachments, but refer to attachment 0.
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, subpasses, 0, nullptr};
VkRenderPass rp;
// "... must be less than the total number of attachments ..."
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassCreateInfo-attachment-00834");
vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassAttachmentUsedTwiceColor) {
ASSERT_NO_FATAL_FAILURE(Init());
TEST_DESCRIPTION("Attachment is used simultaneously as two color attachments. This is not acceptable.");
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference refs[] = {
{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 2, refs, nullptr, nullptr, 0, nullptr},
};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr};
VkRenderPass rp;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"subpass 0 already uses attachment 0 as a color attachment");
vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassAttachmentUsedTwiceMismatchingLayout) {
ASSERT_NO_FATAL_FAILURE(Init());
TEST_DESCRIPTION("Attachment is used simultaneously as color and input. The layouts differ, which is not acceptable.");
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
};
VkAttachmentReference color_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference input_ref = {0, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input_ref, 1, &color_ref, nullptr, nullptr, 0, nullptr},
};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr};
VkRenderPass rp;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-layout-00855");
vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, RenderPassAttachmentUsedTwiceOK) {
ASSERT_NO_FATAL_FAILURE(Init());
TEST_DESCRIPTION("Attachment is used simultaneously as color and input, with the same layout. This is OK.");
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &ref, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr};
VkRenderPass rp;
m_errorMonitor->ExpectSuccess();
vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyNotFound();
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, RenderPassAttachmentUsedTwicePreserveAndColor) {
ASSERT_NO_FATAL_FAILURE(Init());
TEST_DESCRIPTION("Attachment is used simultaneously as color and preserve. This is not acceptable.");
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_GENERAL};
uint32_t preserve_attachment = 0;
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 1, &preserve_attachment},
};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 0, nullptr};
VkRenderPass rp;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pPreserveAttachments-00854");
vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassPipelineSubpassMismatch) {
TEST_DESCRIPTION("Use a pipeline for the wrong subpass in a render pass instance");
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with two subpasses, both writing the same attachment.
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkSubpassDependency dep = {0,
1,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 2, subpasses, 1, &dep};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
char const *vsSource =
"#version 450\n"
"void main() { gl_Position = vec4(1); }\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) out vec4 color;\n"
"void main() { color = vec4(1); }\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_viewports.push_back(viewport);
pipe.SetViewport(m_viewports);
VkRect2D rect = {};
m_scissors.push_back(rect);
pipe.SetScissor(m_scissors);
const VkPipelineLayoutObj pl(m_device);
pipe.CreateVKPipeline(pl.handle(), rp);
m_commandBuffer->begin();
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
rp,
fb,
{{
0,
0,
},
{32, 32}},
0,
nullptr};
// subtest 1: bind in the wrong subpass
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1");
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
m_errorMonitor->VerifyFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
// subtest 2: bind in correct subpass, then transition to next subpass
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "built for subpass 0 but used in subpass 1");
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
m_errorMonitor->VerifyFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, RenderPassBarrierConflicts) {
TEST_DESCRIPTION("Add a pipeline barrier within a subpass that has conflicting state");
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with a single subpass that declared a self-dependency
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep};
VkRenderPass rp;
VkRenderPass rp_noselfdep;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
rpci.dependencyCount = 0;
rpci.pDependencies = nullptr;
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp_noselfdep);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pDependencies-01172");
m_commandBuffer->begin();
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
rp_noselfdep,
fb,
{{
0,
0,
},
{32, 32}},
0,
nullptr};
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
VkMemoryBarrier mem_barrier = {};
mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
mem_barrier.pNext = NULL;
mem_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
mem_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 1,
&mem_barrier, 0, nullptr, 0, nullptr);
m_errorMonitor->VerifyFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
rpbi.renderPass = rp;
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.image = image.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
// Mis-match src stage mask
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01173");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
// Now mis-match dst stage mask
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstStageMask-01174");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_HOST_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
// Set srcQueueFamilyIndex to something other than IGNORED
img_barrier.srcQueueFamilyIndex = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcQueueFamilyIndex-01182");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
// Mis-match mem barrier src access mask
mem_barrier = {};
mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
mem_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
mem_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcAccessMask-01175");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0,
nullptr);
m_errorMonitor->VerifyFound();
// Mis-match mem barrier dst access mask. Also set srcAccessMask to 0 which should not cause an error
mem_barrier.srcAccessMask = 0;
mem_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstAccessMask-01176");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0,
nullptr);
m_errorMonitor->VerifyFound();
// Mis-match image barrier src access mask
img_barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcAccessMask-01175");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Mis-match image barrier dst access mask
img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dstAccessMask-01176");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Mis-match dependencyFlags
img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-dependencyFlags-01177");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0 /* wrong */, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
// Send non-zero bufferMemoryBarrierCount
// Construct a valid BufferMemoryBarrier to avoid any parameter errors
// First we need a valid buffer to reference
VkBufferObj buffer;
VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
buffer.init_as_src_and_dst(*m_device, 256, mem_reqs);
VkBufferMemoryBarrier bmb = {};
bmb.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
bmb.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
bmb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
bmb.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bmb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
bmb.buffer = buffer.handle();
bmb.offset = 0;
bmb.size = VK_WHOLE_SIZE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-bufferMemoryBarrierCount-01178");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &bmb, 0,
nullptr);
m_errorMonitor->VerifyFound();
// Add image barrier w/ image handle that's not in framebuffer
VkImageObj lone_image(m_device);
lone_image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
img_barrier.image = lone_image.handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Have image barrier with mis-matched layouts
img_barrier.image = image.handle();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01181");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-oldLayout-01180");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
vkDestroyRenderPass(m_device->device(), rp_noselfdep, nullptr);
}
TEST_F(VkLayerTest, InvalidSecondaryCommandBufferBarrier) {
TEST_DESCRIPTION("Add an invalid image barrier in a secondary command buffer");
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with a single subpass that declared a self-dependency
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
// Second image that img_barrier will incorrectly use
VkImageObj image2(m_device);
image2.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
rp,
fb,
{{
0,
0,
},
{32, 32}},
0,
nullptr};
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
nullptr,
rp,
0,
VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors
VK_FALSE,
0,
0};
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT,
&cbii};
vkBeginCommandBuffer(secondary.handle(), &cbbi);
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.image = image2.handle(); // Image mis-matches with FB image
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
secondary.end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179");
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->VerifyFound();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, ImageBarrierSubpassConflict) {
TEST_DESCRIPTION("Check case where subpass index references different image from image barrier");
ASSERT_NO_FATAL_FAILURE(Init());
// Create RP/FB combo where subpass has incorrect index attachment, this is 2nd half of "VUID-vkCmdPipelineBarrier-image-01179"
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
// ref attachment points to wrong attachment index compared to img_barrier below
VkAttachmentReference ref = {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attach, 1, subpasses, 1, &dep};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkImageObj image2(m_device);
image2.InitNoLayout(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView2 = image2.targetView(VK_FORMAT_R8G8B8A8_UNORM);
// re-use imageView from start of test
VkImageView iv_array[2] = {imageView, imageView2};
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 2, iv_array, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
rp,
fb,
{{
0,
0,
},
{32, 32}},
0,
nullptr};
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.image = image.handle(); /* barrier references image from attachment index 0 */
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-image-01179");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, TemporaryExternalSemaphore) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external semaphore instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for external semaphore device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external semaphore import and export capability
VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr,
handle_type};
VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR =
(PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp);
if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
// Create a semaphore to export payload from
VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type};
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0};
VkSemaphore export_semaphore;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore);
ASSERT_VK_SUCCESS(err);
// Create a semaphore to import payload into
sci.pNext = nullptr;
VkSemaphore import_semaphore;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore);
ASSERT_VK_SUCCESS(err);
#ifdef _WIN32
// Export semaphore payload to an opaque handle
HANDLE handle = nullptr;
VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore,
handle_type};
auto vkGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR");
err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above *temporarily*
VkImportSemaphoreWin32HandleInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR,
nullptr,
import_semaphore,
VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR,
handle_type,
handle,
nullptr};
auto vkImportSemaphoreWin32HandleKHR =
(PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR");
err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#else
// Export semaphore payload to an opaque handle
int fd = 0;
VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type};
auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR");
err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above *temporarily*
VkImportSemaphoreFdInfoKHR ihi = {VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore,
VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd};
auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR");
err = vkImportSemaphoreFdKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#endif
// Wait on the imported semaphore twice in vkQueueSubmit, the second wait should be an error
VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkSubmitInfo si[] = {
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled");
vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Wait on the imported semaphore twice in vkQueueBindSparse, the second wait should be an error
VkBindSparseInfo bi[] = {
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &import_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "has no way to be signaled");
vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Cleanup
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vkDestroySemaphore(m_device->device(), export_semaphore, nullptr);
vkDestroySemaphore(m_device->device(), import_semaphore, nullptr);
}
TEST_F(VkLayerTest, TemporaryExternalFence) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external fence instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for external fence device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external fence import and export capability
VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type};
VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR");
vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp);
if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
// Create a fence to export payload from
VkFence export_fence;
{
VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type};
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0};
err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence);
ASSERT_VK_SUCCESS(err);
}
// Create a fence to import payload into
VkFence import_fence;
{
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence);
ASSERT_VK_SUCCESS(err);
}
#ifdef _WIN32
// Export fence payload to an opaque handle
HANDLE handle = nullptr;
{
VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceWin32HandleKHR =
(PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR");
err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceWin32HandleInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR,
nullptr,
import_fence,
VK_FENCE_IMPORT_TEMPORARY_BIT_KHR,
handle_type,
handle,
nullptr};
auto vkImportFenceWin32HandleKHR =
(PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR");
err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#else
// Export fence payload to an opaque handle
int fd = 0;
{
VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR");
err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence,
VK_FENCE_IMPORT_TEMPORARY_BIT_KHR, handle_type, fd};
auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR");
err = vkImportFenceFdKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#endif
// Undo the temporary import
vkResetFences(m_device->device(), 1, &import_fence);
// Signal the previously imported fence twice, the second signal should produce a validation error
vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is already in use by another submission.");
vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
m_errorMonitor->VerifyFound();
// Cleanup
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vkDestroyFence(m_device->device(), export_fence, nullptr);
vkDestroyFence(m_device->device(), import_fence, nullptr);
}
TEST_F(VkPositiveLayerTest, SecondaryCommandBufferBarrier) {
TEST_DESCRIPTION("Add a pipeline barrier in a secondary command buffer");
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with a single subpass that declared a self-dependency
VkAttachmentDescription attach[] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpasses[] = {
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr},
};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_SHADER_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dep};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
nullptr,
rp,
fb,
{{
0,
0,
},
{32, 32}},
0,
nullptr};
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferInheritanceInfo cbii = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
nullptr,
rp,
0,
VK_NULL_HANDLE, // Set to NULL FB handle intentionally to flesh out any errors
VK_FALSE,
0,
0};
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT,
&cbii};
vkBeginCommandBuffer(secondary.handle(), &cbbi);
VkMemoryBarrier mem_barrier = {};
mem_barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
mem_barrier.pNext = NULL;
mem_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
mem_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 1, &mem_barrier, 0, nullptr, 0, nullptr);
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.image = image.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(secondary.handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
secondary.end();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkQueueWaitIdle(m_device->m_queue);
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, RenderPassInvalidRenderArea) {
TEST_DESCRIPTION("Generate INVALID_RENDER_AREA error by beginning renderpass with extent outside of framebuffer");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Cannot execute a render pass with renderArea not within the bound of the framebuffer.");
// Framebuffer for render target is 256x256, exceed that for INVALID_RENDER_AREA
m_renderPassBeginInfo.renderArea.extent.width = 257;
m_renderPassBeginInfo.renderArea.extent.height = 257;
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DisabledIndependentBlend) {
TEST_DESCRIPTION(
"Generate INDEPENDENT_BLEND by disabling independent blend and then specifying different blend states for two "
"attachments");
VkPhysicalDeviceFeatures features = {};
features.independentBlend = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(Init(&features));
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Invalid Pipeline CreateInfo: If independent blend feature not enabled, all elements of pAttachments must be identical");
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkPipelineObj pipeline(m_device);
// Create a renderPass with two color attachments
VkAttachmentReference attachments[2] = {};
attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL;
attachments[1].attachment = 1;
attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpass = {};
subpass.pColorAttachments = attachments;
subpass.colorAttachmentCount = 2;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 2;
VkAttachmentDescription attach_desc[2] = {};
attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM;
attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM;
attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass renderpass;
vkCreateRenderPass(m_device->device(), &rpci, NULL, &renderpass);
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
pipeline.AddShader(&vs);
VkPipelineColorBlendAttachmentState att_state1 = {}, att_state2 = {};
att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR;
att_state1.blendEnable = VK_TRUE;
att_state2.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR;
att_state2.blendEnable = VK_FALSE;
pipeline.AddColorAttachment(0, att_state1);
pipeline.AddColorAttachment(1, att_state2);
pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass);
m_errorMonitor->VerifyFound();
vkDestroyRenderPass(m_device->device(), renderpass, NULL);
}
// Is the Pipeline compatible with the expectations of the Renderpass/subpasses?
TEST_F(VkLayerTest, PipelineRenderpassCompatibility) {
TEST_DESCRIPTION(
"Create a graphics pipeline that is incompatible with the requirements of its contained Renderpass/subpasses.");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorSetObj ds_obj(m_device);
ds_obj.AppendDummy();
ds_obj.CreateVKDescriptorSet(m_commandBuffer);
VkShaderObj vs_obj(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkPipelineColorBlendAttachmentState att_state1 = {};
att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR;
att_state1.blendEnable = VK_TRUE;
VkRenderpassObj rp_obj(m_device);
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00753");
VkPipelineObj pipeline(m_device);
pipeline.AddShader(&vs_obj);
pipeline.AddColorAttachment(0, att_state1);
VkGraphicsPipelineCreateInfo info = {};
pipeline.InitGraphicsPipelineCreateInfo(&info);
info.pColorBlendState = nullptr;
pipeline.CreateVKPipeline(ds_obj.GetPipelineLayout(), rp_obj.handle(), &info);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, CreateRenderPassAttachments) {
TEST_DESCRIPTION(
"Ensure that CreateRenderPass produces the expected validation errors when a subpass's attachments violate the valid usage "
"conditions.");
ASSERT_NO_FATAL_FAILURE(Init());
std::vector<VkAttachmentDescription> attachments = {
// input attachments
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
// color attachments
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
// depth attachment
{0, VK_FORMAT_D24_UNORM_S8_UINT, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL},
// resolve attachment
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
// preserve attachments
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_4_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
std::vector<VkAttachmentReference> input = {
{0, VK_IMAGE_LAYOUT_GENERAL},
};
std::vector<VkAttachmentReference> color = {
{1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{2, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
VkAttachmentReference depth = {3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
std::vector<VkAttachmentReference> resolve = {
{4, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
};
std::vector<uint32_t> preserve = {5};
VkSubpassDescription subpass = {0,
VK_PIPELINE_BIND_POINT_GRAPHICS,
(uint32_t)input.size(),
input.data(),
(uint32_t)color.size(),
color.data(),
resolve.data(),
&depth,
(uint32_t)preserve.size(),
preserve.data()};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr,
0,
(uint32_t)attachments.size(),
attachments.data(),
1,
&subpass,
0,
nullptr};
VkRenderPass rp;
VkResult err;
// Test too many color attachments
{
std::vector<VkAttachmentReference> too_many_colors(m_device->props.limits.maxColorAttachments + 1, color[0]);
subpass.colorAttachmentCount = (uint32_t)too_many_colors.size();
subpass.pColorAttachments = too_many_colors.data();
subpass.pResolveAttachments = NULL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-colorAttachmentCount-00845");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
subpass.colorAttachmentCount = (uint32_t)color.size();
subpass.pColorAttachments = color.data();
subpass.pResolveAttachments = resolve.data();
}
// Test sample count mismatch between color buffers
attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_8_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-samples-parameter");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
attachments[subpass.pColorAttachments[1].attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples;
// Test sample count mismatch between color buffers and depth buffer
attachments[subpass.pDepthStencilAttachment->attachment].samples = VK_SAMPLE_COUNT_8_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-samples-parameter");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
attachments[subpass.pDepthStencilAttachment->attachment].samples = attachments[subpass.pColorAttachments[0].attachment].samples;
// Test resolve attachment with UNUSED color attachment
color[0].attachment = VK_ATTACHMENT_UNUSED;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00847");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
color[0].attachment = 1;
// Test resolve from a single-sampled color attachment
attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_1_BIT; // avoid mismatch (00337)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00848");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
attachments[subpass.pColorAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT;
attachments[subpass.pColorAttachments[1].attachment].samples = VK_SAMPLE_COUNT_4_BIT;
// Test resolve to a multi-sampled resolve attachment
attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_4_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00849");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
attachments[subpass.pResolveAttachments[0].attachment].samples = VK_SAMPLE_COUNT_1_BIT;
// Test with color/resolve format mismatch
attachments[subpass.pColorAttachments[0].attachment].format = VK_FORMAT_R8G8B8A8_SRGB;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pResolveAttachments-00850");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
attachments[subpass.pColorAttachments[0].attachment].format = attachments[subpass.pResolveAttachments[0].attachment].format;
// Test for UNUSED preserve attachments
preserve[0] = VK_ATTACHMENT_UNUSED;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-attachment-00853");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
preserve[0] = 5;
// Test for preserve attachments used elsewhere in the subpass
color[0].attachment = preserve[0];
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pPreserveAttachments-00854");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
color[0].attachment = 1;
// test for layout mismatch between input attachment and color attachment
input[0].attachment = color[0].attachment;
input[0].layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-layout-00855");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
input[0].attachment = 0;
input[0].layout = VK_IMAGE_LAYOUT_GENERAL;
// test for layout mismatch between input attachment and depth attachment
input[0].attachment = depth.attachment;
input[0].layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-layout-00855");
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
input[0].attachment = 0;
input[0].layout = VK_IMAGE_LAYOUT_GENERAL;
// Test for attachment used first as input with loadOp=CLEAR
{
std::vector<VkSubpassDescription> subpasses = {subpass, subpass, subpass};
subpasses[0].inputAttachmentCount = 0;
subpasses[1].inputAttachmentCount = 0;
attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
VkRenderPassCreateInfo rpci_multipass = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr,
0,
(uint32_t)attachments.size(),
attachments.data(),
(uint32_t)subpasses.size(),
subpasses.data(),
0,
nullptr};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-loadOp-00846");
err = vkCreateRenderPass(m_device->device(), &rpci_multipass, nullptr, &rp);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) vkDestroyRenderPass(m_device->device(), rp, nullptr);
attachments[input[0].attachment].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
}
}
TEST_F(VkLayerTest, FramebufferCreateErrors) {
TEST_DESCRIPTION(
"Hit errors when attempting to create a framebuffer :\n"
" 1. Mismatch between framebuffer & renderPass attachmentCount\n"
" 2. Use a color image as depthStencil attachment\n"
" 3. Mismatch framebuffer & renderPass attachment formats\n"
" 4. Mismatch framebuffer & renderPass attachment #samples\n"
" 5. Framebuffer attachment w/ non-1 mip-levels\n"
" 6. Framebuffer attachment where dimensions don't match\n"
" 7. Framebuffer attachment where dimensions don't match\n"
" 8. Framebuffer attachment w/o identity swizzle\n"
" 9. framebuffer dimensions exceed physical device limits\n");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-attachmentCount-00876");
// Create a renderPass with a single color attachment
VkAttachmentReference attach = {};
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpass = {};
subpass.pColorAttachments = &attach;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
VkAttachmentDescription attach_desc = {};
attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = &attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
VkImageView ivs[2];
ivs[0] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM);
ivs[1] = m_renderTargets[0]->targetView(VK_FORMAT_B8G8R8A8_UNORM);
VkFramebufferCreateInfo fb_info = {};
fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_info.pNext = NULL;
fb_info.renderPass = rp;
// Set mis-matching attachmentCount
fb_info.attachmentCount = 2;
fb_info.pAttachments = ivs;
fb_info.width = 100;
fb_info.height = 100;
fb_info.layers = 1;
VkFramebuffer fb;
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyRenderPass(m_device->device(), rp, NULL);
// Create a renderPass with a depth-stencil attachment created with
// IMAGE_USAGE_COLOR_ATTACHMENT
// Add our color attachment to pDepthStencilAttachment
subpass.pDepthStencilAttachment = &attach;
subpass.pColorAttachments = NULL;
VkRenderPass rp_ds;
err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp_ds);
ASSERT_VK_SUCCESS(err);
// Set correct attachment count, but attachment has COLOR usage bit set
fb_info.attachmentCount = 1;
fb_info.renderPass = rp_ds;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00878");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyRenderPass(m_device->device(), rp_ds, NULL);
// Create new renderpass with alternate attachment format from fb
attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM;
subpass.pDepthStencilAttachment = NULL;
subpass.pColorAttachments = &attach;
err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
// Cause error due to mis-matched formats between rp & fb
// rp attachment 0 now has RGBA8 but corresponding fb attach is BGRA8
fb_info.renderPass = rp;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00880");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyRenderPass(m_device->device(), rp, NULL);
// Create new renderpass with alternate sample count from fb
attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM;
attach_desc.samples = VK_SAMPLE_COUNT_4_BIT;
err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
// Cause error due to mis-matched sample count between rp & fb
fb_info.renderPass = rp;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00881");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyRenderPass(m_device->device(), rp, NULL);
{
// Create an image with 2 mip levels.
VkImageObj image(m_device);
image.Init(128, 128, 2, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
// Create a image view with two mip levels.
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_B8G8R8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
// Set level count to 2 (only 1 is allowed for FB attachment)
ivci.subresourceRange.levelCount = 2;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
err = vkCreateImageView(m_device->device(), &ivci, NULL, &view);
ASSERT_VK_SUCCESS(err);
// Re-create renderpass to have matching sample count
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
fb_info.renderPass = rp;
fb_info.pAttachments = &view;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00883");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyImageView(m_device->device(), view, NULL);
}
// Update view to original color buffer and grow FB dimensions too big
fb_info.pAttachments = ivs;
fb_info.height = 1024;
fb_info.width = 1024;
fb_info.layers = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
{
// Create an image with one mip level.
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
// Create view attachment with non-identity swizzle
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_B8G8R8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
ivci.components.r = VK_COMPONENT_SWIZZLE_G;
ivci.components.g = VK_COMPONENT_SWIZZLE_R;
ivci.components.b = VK_COMPONENT_SWIZZLE_A;
ivci.components.a = VK_COMPONENT_SWIZZLE_B;
err = vkCreateImageView(m_device->device(), &ivci, NULL, &view);
ASSERT_VK_SUCCESS(err);
fb_info.pAttachments = &view;
fb_info.height = 100;
fb_info.width = 100;
fb_info.layers = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00884");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyImageView(m_device->device(), view, NULL);
}
// reset attachment to color attachment
fb_info.pAttachments = ivs;
// Request fb that exceeds max width
fb_info.width = m_device->props.limits.maxFramebufferWidth + 1;
fb_info.height = 100;
fb_info.layers = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00886");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
// and width=0
fb_info.width = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-width-00885");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
// Request fb that exceeds max height
fb_info.width = 100;
fb_info.height = m_device->props.limits.maxFramebufferHeight + 1;
fb_info.layers = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00888");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
// and height=0
fb_info.height = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-height-00887");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
// Request fb that exceeds max layers
fb_info.width = 100;
fb_info.height = 100;
fb_info.layers = m_device->props.limits.maxFramebufferLayers + 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00890");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-pAttachments-00882");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
// and layers=0
fb_info.layers = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkFramebufferCreateInfo-layers-00889");
err = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
TEST_F(VkLayerTest, DynamicDepthBiasNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Depth Bias dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic depth bias
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic depth bias state not set for this command buffer");
VKTriangleTest(BsoFailDepthBias);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicLineWidthNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Line Width dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic line width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic line width state not set for this command buffer");
VKTriangleTest(BsoFailLineWidth);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicViewportNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Viewport dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic viewport state
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic viewport(s) 0 are used by pipeline state object, but were not provided");
VKTriangleTest(BsoFailViewport);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicScissorNotBound) {
TEST_DESCRIPTION("Run a simple draw calls to validate failure when Scissor dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic scissor state
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic scissor(s) 0 are used by pipeline state object, but were not provided");
VKTriangleTest(BsoFailScissor);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicBlendConstantsNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Blend Constants dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic blend constant state
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic blend constants state not set for this command buffer");
VKTriangleTest(BsoFailBlend);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicDepthBoundsNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Depth Bounds dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
if (!m_device->phy().features().depthBounds) {
printf("%s Device does not support depthBounds test; skipped.\n", kSkipPrefix);
return;
}
// Dynamic depth bounds
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic depth bounds state not set for this command buffer");
VKTriangleTest(BsoFailDepthBounds);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicStencilReadNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Stencil Read dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic stencil read mask
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil read mask state not set for this command buffer");
VKTriangleTest(BsoFailStencilReadMask);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicStencilWriteNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Stencil Write dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic stencil write mask
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil write mask state not set for this command buffer");
VKTriangleTest(BsoFailStencilWriteMask);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DynamicStencilRefNotBound) {
TEST_DESCRIPTION(
"Run a simple draw calls to validate failure when Stencil Ref dynamic state is required but not correctly bound.");
ASSERT_NO_FATAL_FAILURE(Init());
// Dynamic stencil reference
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic stencil reference state not set for this command buffer");
VKTriangleTest(BsoFailStencilReference);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, IndexBufferNotBound) {
TEST_DESCRIPTION("Run an indexed draw call without an index buffer bound.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Index buffer object not bound to this command buffer when Indexed ");
VKTriangleTest(BsoFailIndexBuffer);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, IndexBufferBadSize) {
TEST_DESCRIPTION("Run indexed draw call with bad index buffer size.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size ");
VKTriangleTest(BsoFailIndexBufferBadSize);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, IndexBufferBadOffset) {
TEST_DESCRIPTION("Run indexed draw call with bad index buffer offset.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size ");
VKTriangleTest(BsoFailIndexBufferBadOffset);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, IndexBufferBadBindSize) {
TEST_DESCRIPTION("Run bind index buffer with a size greater than the index buffer.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size ");
VKTriangleTest(BsoFailIndexBufferBadMapSize);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, IndexBufferBadBindOffset) {
TEST_DESCRIPTION("Run bind index buffer with an offset greater than the size of the index buffer.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdDrawIndexed() index size ");
VKTriangleTest(BsoFailIndexBufferBadMapOffset);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CommandBufferTwoSubmits) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// We luck out b/c by default the framework creates CB w/ the
// VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set
m_commandBuffer->begin();
m_commandBuffer->ClearAllBuffers(m_renderTargets, m_clear_color, nullptr, m_depth_clear_color, m_stencil_clear_color);
m_commandBuffer->end();
// Bypass framework since it does the waits automatically
VkResult err = VK_SUCCESS;
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
vkQueueWaitIdle(m_device->m_queue);
// Cause validation error by re-submitting cmd buffer that should only be
// submitted once
err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, AllocDescriptorFromEmptyPool) {
TEST_DESCRIPTION("Attempt to allocate more sets and descriptors than descriptor pool has available.");
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// This test is valid for Vulkan 1.0 only -- skip if device has an API version greater than 1.0.
if (m_device->props.apiVersion >= VK_API_VERSION_1_1) {
printf("%s Device has apiVersion greater than 1.0 -- skipping Descriptor Set checks.\n", kSkipPrefix);
return;
}
// Create Pool w/ 1 Sampler descriptor, but try to alloc Uniform Buffer
// descriptor from it
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count.descriptorCount = 2;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.flags = 0;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding_samp = {};
dsl_binding_samp.binding = 0;
dsl_binding_samp.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dsl_binding_samp.descriptorCount = 1;
dsl_binding_samp.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding_samp.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout_samp(m_device, {dsl_binding_samp});
// Try to allocate 2 sets when pool only has 1 set
VkDescriptorSet descriptor_sets[2];
VkDescriptorSetLayout set_layouts[2] = {ds_layout_samp.handle(), ds_layout_samp.handle()};
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 2;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = set_layouts;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306");
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptor_sets);
m_errorMonitor->VerifyFound();
alloc_info.descriptorSetCount = 1;
// Create layout w/ descriptor type not available in pool
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding});
VkDescriptorSet descriptor_set;
alloc_info.descriptorSetCount = 1;
alloc_info.pSetLayouts = &ds_layout_ub.handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307");
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, FreeDescriptorFromOneShotPool) {
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeDescriptorSets-descriptorPool-00312");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.flags = 0;
// Not specifying VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT means
// app can only call vkResetDescriptorPool on this pool.;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
err = vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidDescriptorPool) {
// Attempt to clear Descriptor Pool with bad object.
// ObjectTracker should catch this.
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-parameter");
uint64_t fake_pool_handle = 0xbaad6001;
VkDescriptorPool bad_pool = reinterpret_cast<VkDescriptorPool &>(fake_pool_handle);
vkResetDescriptorPool(device(), bad_pool, 0);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidDescriptorSet) {
// Attempt to bind an invalid Descriptor Set to a valid Command Buffer
// ObjectTracker should catch this.
// Create a valid cmd buffer
// call vkCmdBindDescriptorSets w/ false Descriptor Set
uint64_t fake_set_handle = 0xbaad6001;
VkDescriptorSet bad_set = reinterpret_cast<VkDescriptorSet &>(fake_set_handle);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-pDescriptorSets-parameter");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorSetLayoutBinding layout_binding = {};
layout_binding.binding = 0;
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
layout_binding.descriptorCount = 1;
layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
layout_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj descriptor_set_layout(m_device, {layout_binding});
const VkPipelineLayoutObj pipeline_layout(DeviceObj(), {&descriptor_set_layout});
m_commandBuffer->begin();
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &bad_set, 0,
NULL);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, InvalidDescriptorSetLayout) {
// Attempt to create a Pipeline Layout with an invalid Descriptor Set Layout.
// ObjectTracker should catch this.
uint64_t fake_layout_handle = 0xbaad6001;
VkDescriptorSetLayout bad_layout = reinterpret_cast<VkDescriptorSetLayout &>(fake_layout_handle);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-parameter");
ASSERT_NO_FATAL_FAILURE(Init());
VkPipelineLayout pipeline_layout;
VkPipelineLayoutCreateInfo plci = {};
plci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
plci.pNext = NULL;
plci.setLayoutCount = 1;
plci.pSetLayouts = &bad_layout;
vkCreatePipelineLayout(device(), &plci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, WriteDescriptorSetIntegrityCheck) {
TEST_DESCRIPTION(
"This test verifies some requirements of chapter 13.2.3 of the Vulkan Spec "
"1) A uniform buffer update must have a valid buffer index. "
"2) When using an array of descriptors in a single WriteDescriptor, the descriptor types and stageflags "
"must all be the same. "
"3) Immutable Sampler state must match across descriptors");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00324");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorPoolSize ds_type_count[4] = {};
ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count[0].descriptorCount = 1;
ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count[1].descriptorCount = 1;
ds_type_count[2].type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count[2].descriptorCount = 1;
ds_type_count[3].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
ds_type_count[3].descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = sizeof(ds_type_count) / sizeof(VkDescriptorPoolSize);
ds_pool_ci.pPoolSizes = ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dslb1 = {};
dslb1.binding = 0;
dslb1.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dslb1.descriptorCount = 1;
dslb1.stageFlags = VK_SHADER_STAGE_ALL;
dslb1.pImmutableSamplers = NULL;
VkDescriptorSetLayoutBinding dslb2 = {};
dslb2.binding = 1;
dslb2.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dslb2.descriptorCount = 1;
dslb2.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb2.pImmutableSamplers = NULL;
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dslb3 = {};
dslb3.binding = 2;
dslb3.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dslb3.descriptorCount = 1;
dslb3.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb3.pImmutableSamplers = static_cast<VkSampler *>(&sampler);
const std::vector<VkDescriptorSetLayoutBinding> layout_bindings = {dslb1, dslb2, dslb3};
const VkDescriptorSetLayoutObj ds_layout(m_device, layout_bindings);
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
VkDescriptorSet descriptorSet;
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
// 1) The uniform buffer is intentionally invalid here
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
// Create a buffer to update the descriptor with
uint32_t qfi = 0;
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffCI.queueFamilyIndexCount = 1;
buffCI.pQueueFamilyIndices = &qfi;
VkBuffer dyub;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub);
ASSERT_VK_SUCCESS(err);
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs);
VkMemoryAllocateInfo mem_alloc_info = {};
mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc_info.allocationSize = mem_reqs.size;
m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), dyub, mem, 0);
ASSERT_VK_SUCCESS(err);
VkDescriptorBufferInfo buffInfo[2] = {};
buffInfo[0].buffer = dyub;
buffInfo[0].offset = 0;
buffInfo[0].range = 1024;
buffInfo[1].buffer = dyub;
buffInfo[1].offset = 0;
buffInfo[1].range = 1024;
descriptor_write.pBufferInfo = buffInfo;
descriptor_write.descriptorCount = 2;
// 2) The stateFlags don't match between the first and second descriptor
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
// 3) The second descriptor has a null_ptr pImmutableSamplers and
// the third descriptor contains an immutable sampler
descriptor_write.dstBinding = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
// Make pImageInfo index non-null to avoid complaints of it missing
VkDescriptorImageInfo imageInfo = {};
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
descriptor_write.pImageInfo = &imageInfo;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), dyub, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroySampler(m_device->device(), sampler, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, WriteDescriptorSetConsecutiveUpdates) {
TEST_DESCRIPTION(
"Verifies that updates rolling over to next descriptor work correctly by destroying buffer from consecutive update known "
"to be used in descriptor set and verifying that error is flagged.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
uint32_t qfi = 0;
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
bci.size = 2048;
bci.queueFamilyIndexCount = 1;
bci.pQueueFamilyIndices = &qfi;
VkBufferObj buffer0;
buffer0.init(*m_device, bci);
VkPipelineObj pipe(m_device);
{ // Scope 2nd buffer to cause early destruction
VkBufferObj buffer1;
bci.size = 1024;
buffer1.init(*m_device, bci);
VkDescriptorBufferInfo buffer_info[3] = {};
buffer_info[0].buffer = buffer0.handle();
buffer_info[0].offset = 0;
buffer_info[0].range = 1024;
buffer_info[1].buffer = buffer0.handle();
buffer_info[1].offset = 1024;
buffer_info[1].range = 1024;
buffer_info[2].buffer = buffer1.handle();
buffer_info[2].offset = 0;
buffer_info[2].range = 1024;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_; // descriptor_set;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 3;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.pBufferInfo = buffer_info;
// Update descriptor
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO that uses the uniform buffers
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n"
"layout(set=0) layout(binding=1) uniform blah { int x; } duh;\n"
"void main(){\n"
" x = vec4(duh.x, bar.y, bar.x, 1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
VkResult err = pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&ds.set_, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
}
// buffer2 just went out of scope and was destroyed along with its memory
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer ");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DeviceMemory ");
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineLayoutExceedsSetLimit) {
TEST_DESCRIPTION("Attempt to create a pipeline layout using more than the physical limit of SetLayouts.");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorSetLayoutBinding layout_binding = {};
layout_binding.binding = 0;
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
layout_binding.descriptorCount = 1;
layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
layout_binding.pImmutableSamplers = NULL;
VkDescriptorSetLayoutCreateInfo ds_layout_ci = {};
ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ds_layout_ci.bindingCount = 1;
ds_layout_ci.pBindings = &layout_binding;
VkDescriptorSetLayout ds_layout = {};
VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
// Create an array of DSLs, one larger than the physical limit
const auto excess_layouts = 1 + m_device->phy().properties().limits.maxBoundDescriptorSets;
std::vector<VkDescriptorSetLayout> dsl_array(excess_layouts, ds_layout);
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.pNext = NULL;
pipeline_layout_ci.setLayoutCount = excess_layouts;
pipeline_layout_ci.pSetLayouts = dsl_array.data();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286");
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
// Clean up
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
}
TEST_F(VkLayerTest, CreatePipelineLayoutExcessPerStageDescriptors) {
TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed per-stage limits");
ASSERT_NO_FATAL_FAILURE(Init());
uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers;
uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers;
uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages;
uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages;
uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers;
uint32_t max_combined = std::min(max_samplers, max_sampled_images);
uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments;
uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic;
uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers;
uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic;
uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers;
uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages;
uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages;
uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers;
uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments;
// Devices that report UINT32_MAX for any of these limits can't run this test
if (UINT32_MAX == std::max({max_uniform_buffers, max_storage_buffers, max_sampled_images, max_storage_images, max_samplers})) {
printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix);
return;
}
VkDescriptorSetLayoutBinding dslb = {};
std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {};
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
VkDescriptorSetLayoutCreateInfo ds_layout_ci = {};
ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ds_layout_ci.pNext = NULL;
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.pNext = NULL;
pipeline_layout_ci.setLayoutCount = 1;
pipeline_layout_ci.pSetLayouts = &ds_layout;
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
// VU 0fe0023e - too many sampler type descriptors in fragment stage
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dslb.descriptorCount = max_samplers;
dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dslb.descriptorCount = max_combined;
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287");
if ((max_samplers + max_combined) > sum_samplers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too
}
if (max_combined > sum_sampled_images) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00240 - too many uniform buffer type descriptors in vertex stage
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dslb.descriptorCount = max_uniform_buffers + 1;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288");
if (dslb.descriptorCount > sum_uniform_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"); // expect all-stages sum too
}
if (dslb.descriptorCount > sum_dyn_uniform_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"); // expect all-stages sum too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00242 - too many storage buffer type descriptors in compute stage
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
dslb.descriptorCount = max_storage_buffers + 1;
dslb.stageFlags = VK_SHADER_STAGE_ALL;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
dslb_vec.push_back(dslb);
dslb.binding = 2;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289");
if (dslb.descriptorCount > sum_dyn_storage_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"); // expect all-stages sum too
}
if (dslb_vec[0].descriptorCount + dslb_vec[2].descriptorCount > sum_storage_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"); // expect all-stages sum too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00244 - too many sampled image type descriptors in multiple stages
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dslb.descriptorCount = max_sampled_images;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS;
dslb_vec.push_back(dslb);
dslb.binding = 2;
dslb.descriptorCount = max_combined;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290");
if (max_combined + 2 * max_sampled_images > sum_sampled_images) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // expect all-stages sum too
}
if (max_combined > sum_samplers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"); // expect all-stages sum too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00246 - too many storage image type descriptors in fragment stage
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
dslb.descriptorCount = 1 + (max_storage_images / 2);
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291");
if (2 * dslb.descriptorCount > sum_storage_images) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"); // expect all-stages sum too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d18 - too many input attachments in fragment stage
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
dslb.descriptorCount = 1 + max_input_attachments;
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676");
if (dslb.descriptorCount > sum_input_attachments) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"); // expect all-stages sum too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
}
TEST_F(VkLayerTest, CreatePipelineLayoutExcessDescriptorsOverall) {
TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed limits");
ASSERT_NO_FATAL_FAILURE(Init());
uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers;
uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers;
uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages;
uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages;
uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers;
uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments;
uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic;
uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers;
uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic;
uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers;
uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages;
uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages;
uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers;
uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments;
// Devices that report UINT32_MAX for any of these limits can't run this test
if (UINT32_MAX == std::max({sum_dyn_uniform_buffers, sum_uniform_buffers, sum_dyn_storage_buffers, sum_storage_buffers,
sum_sampled_images, sum_storage_images, sum_samplers, sum_input_attachments})) {
printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix);
return;
}
VkDescriptorSetLayoutBinding dslb = {};
std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {};
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
VkDescriptorSetLayoutCreateInfo ds_layout_ci = {};
ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ds_layout_ci.pNext = NULL;
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.pNext = NULL;
pipeline_layout_ci.setLayoutCount = 1;
pipeline_layout_ci.pSetLayouts = &ds_layout;
VkPipelineLayout pipeline_layout = VK_NULL_HANDLE;
// VU 0fe00d1a - too many sampler type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dslb.descriptorCount = sum_samplers / 2;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dslb.descriptorCount = sum_samplers - dslb.descriptorCount + 1;
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677");
if (dslb.descriptorCount > max_samplers) {
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"); // Expect max-per-stage samplers exceeds limits
}
if (dslb.descriptorCount > sum_sampled_images) {
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"); // Expect max overall sampled image count exceeds limits
}
if (dslb.descriptorCount > max_sampled_images) {
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max per-stage sampled image count exceeds limits
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d1c - too many uniform buffer type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dslb.descriptorCount = sum_uniform_buffers + 1;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678");
if (dslb.descriptorCount > max_uniform_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d1e - too many dynamic uniform buffer type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dslb.descriptorCount = sum_dyn_uniform_buffers + 1;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679");
if (dslb.descriptorCount > max_uniform_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"); // expect max-per-stage too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d20 - too many storage buffer type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
dslb.descriptorCount = sum_storage_buffers + 1;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680");
if (dslb.descriptorCount > max_storage_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d22 - too many dynamic storage buffer type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
dslb.descriptorCount = sum_dyn_storage_buffers + 1;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681");
if (dslb.descriptorCount > max_storage_buffers) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"); // expect max-per-stage too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d24 - too many sampled image type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dslb.descriptorCount = max_samplers;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
// revisit: not robust to odd limits.
uint32_t remaining = (max_samplers > sum_sampled_images ? 0 : (sum_sampled_images - max_samplers) / 2);
dslb.descriptorCount = 1 + remaining;
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
dslb.binding = 2;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682");
if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > max_sampled_images) {
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"); // Expect max-per-stage sampled images to exceed limits
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d26 - too many storage image type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
dslb.descriptorCount = sum_storage_images / 2;
dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
dslb.binding = 1;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
dslb.descriptorCount = sum_storage_images - dslb.descriptorCount + 1;
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683");
if (dslb.descriptorCount > max_storage_images) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"); // expect max-per-stage too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
// VU 0fe00d28 - too many input attachment type descriptors overall
dslb_vec.clear();
dslb.binding = 0;
dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
dslb.descriptorCount = sum_input_attachments + 1;
dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dslb.pImmutableSamplers = NULL;
dslb_vec.push_back(dslb);
ds_layout_ci.bindingCount = dslb_vec.size();
ds_layout_ci.pBindings = dslb_vec.data();
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684");
if (dslb.descriptorCount > max_input_attachments) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"); // expect max-per-stage too
}
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed
pipeline_layout = VK_NULL_HANDLE;
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
}
TEST_F(VkLayerTest, InvalidCmdBufferBufferDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a buffer dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buf_info.size = 256;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = mem_reqs.size;
bool pass = false;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
printf("%s Failed to set memory type.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
vkCmdFillBuffer(m_commandBuffer->handle(), buffer, 0, VK_WHOLE_SIZE, 0);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer ");
// Destroy buffer dependency prior to submit to cause ERROR
vkDestroyBuffer(m_device->device(), buffer, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
vkFreeMemory(m_device->handle(), mem, NULL);
}
TEST_F(VkLayerTest, InvalidCmdBufferBufferViewDestroyed) {
TEST_DESCRIPTION("Delete bufferView bound to cmd buffer, then attempt to submit cmd buffer.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count;
ds_type_count.type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding layout_binding;
layout_binding.binding = 0;
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
layout_binding.descriptorCount = 1;
layout_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
layout_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {layout_binding});
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
VkDescriptorSet descriptor_set;
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
VkBuffer buffer;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory buffer_memory;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
VkBufferView view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.buffer = buffer;
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view);
ASSERT_VK_SUCCESS(err);
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptor_set;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor_write.pTexelBufferView = &view;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = imageLoad(s, 0);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound BufferView ");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// Bind pipeline to cmd buffer - This causes crash on Mali
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_set, 0, nullptr);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Delete BufferView in order to invalidate cmd buffer
vkDestroyBufferView(m_device->device(), view, NULL);
// Now attempt submit of cmd buffer
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Clean-up
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), buffer_memory, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidCmdBufferImageDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an image dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkImage image;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
image_create_info.flags = 0;
VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Have to bind memory to image before recording cmd in cmd buffer using it
VkMemoryRequirements mem_reqs;
VkDeviceMemory image_mem;
bool pass;
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image, image_mem, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkClearColorValue ccv;
ccv.float32[0] = 1.0f;
ccv.float32[1] = 1.0f;
ccv.float32[2] = 1.0f;
ccv.float32[3] = 1.0f;
VkImageSubresourceRange isr = {};
isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
isr.baseArrayLayer = 0;
isr.baseMipLevel = 0;
isr.layerCount = 1;
isr.levelCount = 1;
vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image ");
// Destroy image dependency prior to submit to cause ERROR
vkDestroyImage(m_device->device(), image, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), image_mem, nullptr);
}
TEST_F(VkLayerTest, InvalidCmdBufferFramebufferImageDestroyed) {
TEST_DESCRIPTION(
"Attempt to draw with a command buffer that is invalid due to a framebuffer image dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
printf("%s Image format doesn't support required features.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageCreateInfo image_ci = {};
image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_ci.pNext = NULL;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = VK_FORMAT_B8G8R8A8_UNORM;
image_ci.extent.width = 32;
image_ci.extent.height = 32;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = 1;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_ci.flags = 0;
VkImage image;
ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image));
VkMemoryRequirements memory_reqs;
VkDeviceMemory image_memory;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image, image_memory, 0);
ASSERT_VK_SUCCESS(err);
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image,
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_B8G8R8A8_UNORM,
{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
m_renderPassBeginInfo.renderArea.extent.width = 32;
m_renderPassBeginInfo.renderArea.extent.height = 32;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Destroy image attached to framebuffer to invalidate cmd buffer
vkDestroyImage(m_device->device(), image, NULL);
// Now attempt to submit cmd buffer and verify error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image ");
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyImageView(m_device->device(), view, nullptr);
vkFreeMemory(m_device->device(), image_memory, nullptr);
}
TEST_F(VkLayerTest, FramebufferInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use framebuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(256, 256, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put it in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy framebuffer while in-flight
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyFramebuffer-framebuffer-00892");
vkDestroyFramebuffer(m_device->device(), fb, NULL);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy everything
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If framebuffer is not VK_NULL_HANDLE, framebuffer must be a valid VkFramebuffer handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Framebuffer obj");
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
}
TEST_F(VkLayerTest, FramebufferImageInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use image that's child of framebuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
VkResult err = VK_SUCCESS;
vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_B8G8R8A8_UNORM, &format_properties);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageCreateInfo image_ci = {};
image_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_ci.pNext = NULL;
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.format = VK_FORMAT_B8G8R8A8_UNORM;
image_ci.extent.width = 256;
image_ci.extent.height = 256;
image_ci.extent.depth = 1;
image_ci.mipLevels = 1;
image_ci.arrayLayers = 1;
image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_ci.flags = 0;
VkImage image;
ASSERT_VK_SUCCESS(vkCreateImage(m_device->handle(), &image_ci, NULL, &image));
VkMemoryRequirements memory_reqs;
VkDeviceMemory image_memory;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image, image_memory, 0);
ASSERT_VK_SUCCESS(err);
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image,
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_B8G8R8A8_UNORM,
{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, m_renderPass, 1, &view, 256, 256, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Just use default renderpass with our framebuffer
m_renderPassBeginInfo.framebuffer = fb;
// Create Null cmd buffer for submit
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put it (and attached imageView) in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer to put framebuffer and children in-flight
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy image attached to framebuffer while in-flight
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImage-image-01000");
vkDestroyImage(m_device->device(), image, NULL);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy image and other objects
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If image is not VK_NULL_HANDLE, image must be a valid VkImage handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Image obj");
vkDestroyImage(m_device->device(), image, NULL);
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyImageView(m_device->device(), view, nullptr);
vkFreeMemory(m_device->device(), image_memory, nullptr);
}
TEST_F(VkLayerTest, RenderPassInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use renderPass.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create simple renderpass
VkAttachmentReference attach = {};
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpass = {};
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &attach;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
VkAttachmentDescription attach_desc = {};
attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = &attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
VkRenderPassBeginInfo rpbi = {};
rpbi.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rpbi.framebuffer = m_framebuffer;
rpbi.renderPass = rp;
m_commandBuffer->BeginRenderPass(rpbi);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyRenderPass-renderPass-00873");
vkDestroyRenderPass(m_device->device(), rp, nullptr);
m_errorMonitor->VerifyFound();
// Wait for queue to complete so we can safely destroy rp
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If renderPass is not VK_NULL_HANDLE, renderPass must be a valid VkRenderPass handle");
m_errorMonitor->SetUnexpectedError("Was it created? Has it already been destroyed?");
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, ImageMemoryNotBound) {
TEST_DESCRIPTION("Attempt to draw with an image which has not had memory bound to it.");
ASSERT_NO_FATAL_FAILURE(Init());
VkImage image;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
image_create_info.flags = 0;
VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Have to bind memory to image before recording cmd in cmd buffer using it
VkMemoryRequirements mem_reqs;
VkDeviceMemory image_mem;
bool pass;
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
mem_alloc.allocationSize = mem_reqs.size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &image_mem);
ASSERT_VK_SUCCESS(err);
// Introduce error, do not call vkBindImageMemory(m_device->device(), image, image_mem, 0);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindImageMemory().");
m_commandBuffer->begin();
VkClearColorValue ccv;
ccv.float32[0] = 1.0f;
ccv.float32[1] = 1.0f;
ccv.float32[2] = 1.0f;
ccv.float32[3] = 1.0f;
VkImageSubresourceRange isr = {};
isr.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
isr.baseArrayLayer = 0;
isr.baseMipLevel = 0;
isr.layerCount = 1;
isr.levelCount = 1;
vkCmdClearColorImage(m_commandBuffer->handle(), image, VK_IMAGE_LAYOUT_GENERAL, &ccv, 1, &isr);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image, NULL);
vkFreeMemory(m_device->device(), image_mem, nullptr);
}
TEST_F(VkLayerTest, BufferMemoryNotBound) {
TEST_DESCRIPTION("Attempt to copy from a buffer which has not had memory bound to it.");
ASSERT_NO_FATAL_FAILURE(Init());
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkBuffer buffer;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
buf_info.size = 1024;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = 1024;
bool pass = false;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
printf("%s Failed to set memory type.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
// Introduce failure by not calling vkBindBufferMemory(m_device->device(), buffer, mem, 0);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindBufferMemory().");
VkBufferImageCopy region = {};
region.bufferRowLength = 16;
region.bufferImageHeight = 16;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.height = 4;
region.imageExtent.width = 4;
region.imageExtent.depth = 1;
m_commandBuffer->begin();
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->handle(), mem, NULL);
}
TEST_F(VkLayerTest, InvalidCmdBufferEventDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to an event dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo evci = {};
evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
VkResult result = vkCreateEvent(m_device->device(), &evci, NULL, &event);
ASSERT_VK_SUCCESS(result);
m_commandBuffer->begin();
vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Event ");
// Destroy event dependency prior to submit to cause ERROR
vkDestroyEvent(m_device->device(), event, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCmdBufferQueryPoolDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a query pool dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
VkQueryPool query_pool;
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_TIMESTAMP;
qpci.queryCount = 1;
VkResult result = vkCreateQueryPool(m_device->device(), &qpci, nullptr, &query_pool);
ASSERT_VK_SUCCESS(result);
m_commandBuffer->begin();
vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound QueryPool ");
// Destroy query pool dependency prior to submit to cause ERROR
vkDestroyQueryPool(m_device->device(), query_pool, NULL);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCmdBufferPipelineDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a pipeline dependency being destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
{
// Use helper to create graphics pipeline
CreatePipelineHelper helper(*this);
helper.InitInfo();
helper.InitState();
helper.CreateGraphicsPipeline();
// Bind helper pipeline to command buffer
m_commandBuffer->begin();
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_);
m_commandBuffer->end();
// pipeline will be destroyed when helper goes out of scope
}
// Cause error by submitting command buffer that references destroyed pipeline
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Pipeline ");
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, DestroyPipelineRenderPass) {
TEST_DESCRIPTION("Draw using a pipeline whose create renderPass has been destroyed.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkResult err;
// Create a renderPass that's compatible with Draw-time renderPass
VkAttachmentDescription att = {};
att.format = m_render_target_fmt;
att.samples = VK_SAMPLE_COUNT_1_BIT;
att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentReference ref = {};
ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
ref.attachment = 0;
m_renderPassClearValues.clear();
VkClearValue clear = {};
clear.color = m_clear_color;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.flags = 0;
subpass.inputAttachmentCount = 0;
subpass.pInputAttachments = NULL;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &ref;
subpass.pResolveAttachments = NULL;
subpass.pDepthStencilAttachment = NULL;
subpass.preserveAttachmentCount = 0;
subpass.pPreserveAttachments = NULL;
VkRenderPassCreateInfo rp_info = {};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = 1;
rp_info.pAttachments = &att;
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
VkRenderPass rp;
err = vkCreateRenderPass(device(), &rp_info, NULL, &rp);
ASSERT_VK_SUCCESS(err);
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_viewports.push_back(viewport);
pipe.SetViewport(m_viewports);
VkRect2D rect = {{0, 0}, {64, 64}};
m_scissors.push_back(rect);
pipe.SetScissor(m_scissors);
const VkPipelineLayoutObj pl(m_device);
pipe.CreateVKPipeline(pl.handle(), rp);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// Destroy renderPass before pipeline is used in Draw
// We delay until after CmdBindPipeline to verify that invalid binding isn't
// created between CB & renderPass, which we used to do.
vkDestroyRenderPass(m_device->device(), rp, nullptr);
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vkQueueWaitIdle(m_device->m_queue);
}
TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetBufferDestroyed) {
TEST_DESCRIPTION(
"Attempt to draw with a command buffer that is invalid due to a bound descriptor set with a buffer dependency being "
"destroyed.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create a buffer to update the descriptor with
uint32_t qfi = 0;
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffCI.queueFamilyIndexCount = 1;
buffCI.pQueueFamilyIndices = &qfi;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
// Allocate memory and bind to buffer so we can make it to the appropriate
// error
VkMemoryRequirements memReqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = memReqs.size;
mem_alloc.memoryTypeIndex = 0;
bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to set memory type.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
// Correctly update descriptor to avoid "NOT_UPDATED" error
VkDescriptorBufferInfo buffInfo = {};
buffInfo.buffer = buffer;
buffInfo.offset = 0;
buffInfo.range = 1024;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.pBufferInfo = &buffInfo;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n"
"void main(){\n"
" x = vec4(bar.y);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &m_viewports[0]);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &m_scissors[0]);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Buffer ");
// Destroy buffer should invalidate the cmd buffer, causing error on submit
vkDestroyBuffer(m_device->device(), buffer, NULL);
// Attempt to submit cmd buffer
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Cleanup
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidCmdBufferDescriptorSetImageSamplerDestroyed) {
TEST_DESCRIPTION(
"Attempt to draw with a command buffer that is invalid due to a bound descriptor sets with a combined image sampler having "
"their image, sampler, and descriptor set each respectively destroyed and then attempting to submit associated cmd "
"buffers. Attempt to destroy a DescriptorSet that is in use.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create images to update the descriptor with
VkImage image;
VkImage image2;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image2);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory image_memory;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
// Allocate enough memory for both images
memory_info.allocationSize = memory_reqs.size * 2;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image, image_memory, 0);
ASSERT_VK_SUCCESS(err);
// Bind second image to memory right after first image
err = vkBindImageMemory(m_device->device(), image2, image_memory, memory_reqs.size);
ASSERT_VK_SUCCESS(err);
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image;
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = tex_format;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageView tmp_view; // First test deletes this view
VkImageView view;
VkImageView view2;
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &tmp_view);
ASSERT_VK_SUCCESS(err);
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
ASSERT_VK_SUCCESS(err);
image_view_create_info.image = image2;
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view2);
ASSERT_VK_SUCCESS(err);
// Create Samplers
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkSampler sampler2;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler2);
ASSERT_VK_SUCCESS(err);
// Update descriptor with image and sampler
VkDescriptorImageInfo img_info = {};
img_info.sampler = sampler;
img_info.imageView = tmp_view;
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &img_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
// First error case is destroying sampler prior to cmd buffer submission
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// This first submit should be successful
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkQueueWaitIdle(m_device->m_queue);
// Now destroy imageview and reset cmdBuffer
vkDestroyImageView(m_device->device(), tmp_view, NULL);
m_commandBuffer->reset(0);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that has been destroyed.");
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Re-update descriptor with new view
img_info.imageView = view;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Now test destroying sampler prior to cmd buffer submission
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Destroy sampler invalidates the cmd buffer, causing error on submit
vkDestroySampler(m_device->device(), sampler, NULL);
// Attempt to submit cmd buffer
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound Sampler");
submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Now re-update descriptor with valid sampler and delete image
img_info.sampler = sampler2;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
VkCommandBufferBeginInfo info = {};
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound Image ");
m_commandBuffer->begin(&info);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Destroy image invalidates the cmd buffer, causing error on submit
vkDestroyImage(m_device->device(), image, NULL);
// Attempt to submit cmd buffer
submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Now update descriptor to be valid, but then free descriptor
img_info.imageView = view2;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_commandBuffer->begin(&info);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Immediately try to destroy the descriptor set in the active command buffer - failure expected
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call vkFreeDescriptorSets() on descriptor set 0x");
vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet);
m_errorMonitor->VerifyFound();
// Try again once the queue is idle - should succeed w/o error
// TODO - though the particular error above doesn't re-occur, there are other 'unexpecteds' still to clean up
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError(
"pDescriptorSets must be a valid pointer to an array of descriptorSetCount VkDescriptorSet handles, each element of which "
"must either be a valid handle or VK_NULL_HANDLE");
m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorSet obj");
vkFreeDescriptorSets(m_device->device(), ds_pool, 1, &descriptorSet);
// Attempt to submit cmd buffer containing the freed descriptor set
submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " that is invalid because bound DescriptorSet ");
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
// Cleanup
vkFreeMemory(m_device->device(), image_memory, NULL);
vkDestroySampler(m_device->device(), sampler2, NULL);
vkDestroyImage(m_device->device(), image2, NULL);
vkDestroyImageView(m_device->device(), view, NULL);
vkDestroyImageView(m_device->device(), view2, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidDescriptorSetSamplerDestroyed) {
TEST_DESCRIPTION("Attempt to draw with a bound descriptor sets with a combined image sampler where sampler has been deleted.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
// Create images to update the descriptor with
VkImageObj image(m_device);
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image.handle();
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = tex_format;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageView view;
VkResult err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
ASSERT_VK_SUCCESS(err);
// Create Samplers
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
// Update descriptor with image and sampler
VkDescriptorImageInfo img_info = {};
img_info.sampler = sampler;
img_info.imageView = view;
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &img_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Destroy the sampler before it's bound to the cmd buffer
vkDestroySampler(m_device->device(), sampler, NULL);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
// First error case is destroying sampler prior to cmd buffer submission
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0,
NULL);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" Descriptor in binding #0 at global descriptor index 0 is using sampler ");
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
vkDestroyImageView(m_device->device(), view, NULL);
}
TEST_F(VkLayerTest, ImageDescriptorLayoutMismatch) {
TEST_DESCRIPTION("Update an image sampler with a layout that doesn't match the actual image layout at the image is used.");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create images to update the descriptor with
const VkFormat format = VK_FORMAT_B8G8R8A8_UNORM;
VkImageObj image(m_device);
image.Init(32, 32, 1, format, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_TILING_OPTIMAL,
0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image.handle();
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = format;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageView view;
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
ASSERT_VK_SUCCESS(err);
// Create Sampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
// Update descriptor with image and sampler
VkDescriptorImageInfo img_info = {};
img_info.sampler = sampler;
img_info.imageView = view;
// This should cause a mis-match. Actual layout at use time is SHADER_RO
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &img_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
VkCommandBufferObj cmd_buf(m_device, m_commandPool);
cmd_buf.begin();
// record layout different than actual descriptor layout of SHADER_RO
image.SetLayout(&cmd_buf, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
cmd_buf.BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(cmd_buf.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &descriptorSet, 0,
NULL);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport);
vkCmdSetScissor(cmd_buf.handle(), 0, 1, &scissor);
// At draw time the update layout will mis-match the actual layout
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageLayout-00344");
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
" Image layout specified at vkUpdateDescriptorSets() time doesn't match actual image layout at time descriptor is used.");
cmd_buf.Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
cmd_buf.EndRenderPass();
cmd_buf.end();
// Submit cmd buffer
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmd_buf.handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkQueueWaitIdle(m_device->m_queue);
// Cleanup
vkDestroySampler(m_device->device(), sampler, NULL);
vkDestroyImageView(m_device->device(), view, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, DescriptorPoolInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete a DescriptorPool with a DescriptorSet that is in use.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptor_set;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create image to update the descriptor with
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
// Create Sampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
// Update descriptor with image and sampler
VkDescriptorImageInfo img_info = {};
img_info.sampler = sampler;
img_info.imageView = view;
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptor_set;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &img_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_set, 0, NULL);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put pool in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Destroy pool while in-flight, causing error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyDescriptorPool-descriptorPool-00303");
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Cleanup
vkDestroySampler(m_device->device(), sampler, NULL);
m_errorMonitor->SetUnexpectedError(
"If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj");
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
// TODO : It seems Validation layers think ds_pool was already destroyed, even though it wasn't?
}
TEST_F(VkLayerTest, DescriptorPoolInUseResetSignaled) {
TEST_DESCRIPTION("Reset a DescriptorPool with a DescriptorSet that is in use.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = nullptr;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, nullptr, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = nullptr;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptor_set;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create image to update the descriptor with
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
// Create Sampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, nullptr, &sampler);
ASSERT_VK_SUCCESS(err);
// Update descriptor with image and sampler
VkDescriptorImageInfo img_info = {};
img_info.sampler = sampler;
img_info.imageView = view;
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptor_set;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &img_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, nullptr);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_set, 0, nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer to put pool in-flight
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
// Reset pool while in-flight, causing error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetDescriptorPool-descriptorPool-00313");
vkResetDescriptorPool(m_device->device(), ds_pool, 0);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Cleanup
vkDestroySampler(m_device->device(), sampler, nullptr);
m_errorMonitor->SetUnexpectedError(
"If descriptorPool is not VK_NULL_HANDLE, descriptorPool must be a valid VkDescriptorPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove DescriptorPool obj");
vkDestroyDescriptorPool(m_device->device(), ds_pool, nullptr);
}
TEST_F(VkLayerTest, DescriptorImageUpdateNoMemoryBound) {
TEST_DESCRIPTION("Attempt an image descriptor set update where image's bound memory has been freed.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
VkResult err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create images to update the descriptor with
VkImage image;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Initially bind memory to avoid error at bind view time. We'll break binding before update.
VkMemoryRequirements memory_reqs;
VkDeviceMemory image_memory;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
// Allocate enough memory for image
memory_info.allocationSize = memory_reqs.size;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &image_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image, image_memory, 0);
ASSERT_VK_SUCCESS(err);
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image;
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = tex_format;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageView view;
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
ASSERT_VK_SUCCESS(err);
// Create Samplers
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
// Update descriptor with image and sampler
VkDescriptorImageInfo img_info = {};
img_info.sampler = sampler;
img_info.imageView = view;
img_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &img_info;
// Break memory binding and attempt update
vkFreeMemory(m_device->device(), image_memory, nullptr);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" previously bound memory was freed. Memory must not be freed prior to this operation.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
// Cleanup
vkDestroyImage(m_device->device(), image, NULL);
vkDestroySampler(m_device->device(), sampler, NULL);
vkDestroyImageView(m_device->device(), view, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidPipeline) {
// Attempt to bind an invalid Pipeline to a valid Command Buffer
// ObjectTracker should catch this.
// Create a valid cmd buffer
// call vkCmdBindPipeline w/ false Pipeline
uint64_t fake_pipeline_handle = 0xbaad6001;
VkPipeline bad_pipeline = reinterpret_cast<VkPipeline &>(fake_pipeline_handle);
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipeline-parameter");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, bad_pipeline);
m_errorMonitor->VerifyFound();
// Now issue a draw call with no pipeline bound
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "At Draw/Dispatch time no valid VkPipeline is bound!");
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
// Finally same check once more but with Dispatch/Compute
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "At Draw/Dispatch time no valid VkPipeline is bound!");
vkCmdEndRenderPass(m_commandBuffer->handle()); // must be outside renderpass
vkCmdDispatch(m_commandBuffer->handle(), 0, 0, 0);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CmdDispatchExceedLimits) {
TEST_DESCRIPTION("Compute dispatch with dimensions that exceed device limits");
// Enable KHX device group extensions, if available
if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
bool khx_dg_ext_available = false;
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME);
khx_dg_ext_available = true;
}
ASSERT_NO_FATAL_FAILURE(InitState());
uint32_t x_limit = m_device->props.limits.maxComputeWorkGroupCount[0];
uint32_t y_limit = m_device->props.limits.maxComputeWorkGroupCount[1];
uint32_t z_limit = m_device->props.limits.maxComputeWorkGroupCount[2];
if (std::max({x_limit, y_limit, z_limit}) == UINT32_MAX) {
printf("%s device maxComputeWorkGroupCount limit reports UINT32_MAX, test not possible, skipping.\n", kSkipPrefix);
return;
}
// Create a minimal compute pipeline
std::string cs_text = "#version 450\nvoid main() {}\n"; // minimal no-op shader
VkShaderObj cs_obj(m_device, cs_text.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, this);
VkPipelineLayoutCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
info.pNext = nullptr;
VkPipelineLayout pipe_layout;
vkCreatePipelineLayout(device(), &info, nullptr, &pipe_layout);
VkComputePipelineCreateInfo pipeline_info = {};
pipeline_info.sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
pipeline_info.pNext = nullptr;
pipeline_info.flags = khx_dg_ext_available ? VK_PIPELINE_CREATE_DISPATCH_BASE_KHR : 0;
pipeline_info.layout = pipe_layout;
pipeline_info.basePipelineHandle = VK_NULL_HANDLE;
pipeline_info.basePipelineIndex = -1;
pipeline_info.stage.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
pipeline_info.stage.pNext = nullptr;
pipeline_info.stage.flags = 0;
pipeline_info.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT;
pipeline_info.stage.module = cs_obj.handle();
pipeline_info.stage.pName = "main";
pipeline_info.stage.pSpecializationInfo = nullptr;
VkPipeline cs_pipeline;
vkCreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &cs_pipeline);
// Bind pipeline to command buffer
m_commandBuffer->begin();
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline);
// Dispatch counts that exceed device limits
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountX-00386");
vkCmdDispatch(m_commandBuffer->handle(), x_limit + 1, y_limit, z_limit);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountY-00387");
vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit + 1, z_limit);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatch-groupCountZ-00388");
vkCmdDispatch(m_commandBuffer->handle(), x_limit, y_limit, z_limit + 1);
m_errorMonitor->VerifyFound();
if (khx_dg_ext_available) {
PFN_vkCmdDispatchBaseKHR fp_vkCmdDispatchBaseKHR =
(PFN_vkCmdDispatchBaseKHR)vkGetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR");
// Base equals or exceeds limit
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00421");
fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit, y_limit - 1, z_limit - 1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupX-00422");
fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit, z_limit - 1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-baseGroupZ-00423");
fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_limit - 1, y_limit - 1, z_limit, 0, 0, 0);
m_errorMonitor->VerifyFound();
// (Base + count) exceeds limit
uint32_t x_base = x_limit / 2;
uint32_t y_base = y_limit / 2;
uint32_t z_base = z_limit / 2;
x_limit -= x_base;
y_limit -= y_base;
z_limit -= z_base;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountX-00424");
fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit + 1, y_limit, z_limit);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountY-00425");
fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit + 1, z_limit);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDispatchBase-groupCountZ-00426");
fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_limit, y_limit, z_limit + 1);
m_errorMonitor->VerifyFound();
} else {
printf("%s KHX_DEVICE_GROUP_* extensions not supported, skipping CmdDispatchBaseKHR() tests.\n", kSkipPrefix);
}
// Clean up
vkDestroyPipeline(device(), cs_pipeline, nullptr);
vkDestroyPipelineLayout(device(), pipe_layout, nullptr);
}
TEST_F(VkLayerTest, MultiplaneImageLayoutBadAspectFlags) {
TEST_DESCRIPTION("Query layout of a multiplane image using illegal aspect flag masks");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_LINEAR;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify formats
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR;
supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImage image_2plane, image_3plane;
ci.format = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR;
VkResult err = vkCreateImage(device(), &ci, NULL, &image_2plane);
ASSERT_VK_SUCCESS(err);
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR;
err = vkCreateImage(device(), &ci, NULL, &image_3plane);
ASSERT_VK_SUCCESS(err);
// Query layout of 3rd plane, for a 2-plane image
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
subres.mipLevel = 0;
subres.arrayLayer = 0;
VkSubresourceLayout layout = {};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01581");
vkGetImageSubresourceLayout(device(), image_2plane, &subres, &layout);
m_errorMonitor->VerifyFound();
// Query layout using color aspect, for a 3-plane image
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-format-01582");
vkGetImageSubresourceLayout(device(), image_3plane, &subres, &layout);
m_errorMonitor->VerifyFound();
// Clean up
vkDestroyImage(device(), image_2plane, NULL);
vkDestroyImage(device(), image_3plane, NULL);
}
TEST_F(VkPositiveLayerTest, MultiplaneGetImageSubresourceLayout) {
TEST_DESCRIPTION("Positive test, query layout of a single plane of a multiplane image. (repro Github #2530)");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_LINEAR;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify format
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImage image;
VkResult err = vkCreateImage(device(), &ci, NULL, &image);
ASSERT_VK_SUCCESS(err);
// Query layout of 3rd plane
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
subres.mipLevel = 0;
subres.arrayLayer = 0;
VkSubresourceLayout layout = {};
m_errorMonitor->ExpectSuccess();
vkGetImageSubresourceLayout(device(), image, &subres, &layout);
m_errorMonitor->VerifyNotFound();
vkDestroyImage(device(), image, NULL);
}
TEST_F(VkLayerTest, DescriptorSetNotUpdated) {
TEST_DESCRIPTION("Bind a descriptor set that hasn't been updated.");
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, " bound but it was never updated. ");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
// We shouldn't need a fragment shader but add it to be able to run
// on more devices
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidBufferViewObject) {
// Create a single TEXEL_BUFFER descriptor and send it an invalid bufferView
// First, cause the bufferView to be invalid due to underlying buffer being destroyed
// Then destroy view itself and verify that same error is hit
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
// Create a valid bufferView to start with
VkBuffer buffer;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory buffer_memory;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
VkBufferView view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.buffer = buffer;
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view);
ASSERT_VK_SUCCESS(err);
// First Destroy buffer underlying view which should hit error in CV
vkDestroyBuffer(m_device->device(), buffer, NULL);
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
descriptor_write.pTexelBufferView = &view;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
// Now destroy view itself and verify same error, which is hit in PV this time
vkDestroyBufferView(m_device->device(), view, NULL);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00323");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), buffer_memory, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, CreateBufferViewNoMemoryBoundToBuffer) {
TEST_DESCRIPTION("Attempt to create a buffer view with a buffer that has no memory bound to it.");
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindBufferMemory().");
ASSERT_NO_FATAL_FAILURE(Init());
// Create a buffer with no bound memory and then attempt to create
// a buffer view.
VkBufferCreateInfo buff_ci = {};
buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
buff_ci.size = 256;
buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkBufferViewCreateInfo buff_view_ci = {};
buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
buff_view_ci.buffer = buffer;
buff_view_ci.format = VK_FORMAT_R8_UNORM;
buff_view_ci.range = VK_WHOLE_SIZE;
VkBufferView buff_view;
err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buff_view);
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), buffer, NULL);
// If last error is success, it still created the view, so delete it.
if (err == VK_SUCCESS) {
vkDestroyBufferView(m_device->device(), buff_view, NULL);
}
}
TEST_F(VkLayerTest, InvalidDynamicOffsetCases) {
// Create a descriptorSet w/ dynamic descriptor and then hit 3 offset error
// cases:
// 1. No dynamicOffset supplied
// 2. Too many dynamicOffsets supplied
// 3. Dynamic offset oversteps buffer being updated
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" requires 1 dynamicOffsets, but only 0 dynamicOffsets are left in pDynamicOffsets ");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
// Create a buffer to update the descriptor with
uint32_t qfi = 0;
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffCI.queueFamilyIndexCount = 1;
buffCI.pQueueFamilyIndices = &qfi;
VkBuffer dyub;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub);
ASSERT_VK_SUCCESS(err);
// Allocate memory and bind to buffer so we can make it to the appropriate error
VkMemoryRequirements memReqs;
vkGetBufferMemoryRequirements(m_device->device(), dyub, &memReqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = memReqs.size;
mem_alloc.memoryTypeIndex = 0;
bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), dyub, NULL);
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), dyub, mem, 0);
ASSERT_VK_SUCCESS(err);
// Correctly update descriptor to avoid "NOT_UPDATED" error
VkDescriptorBufferInfo buffInfo = {};
buffInfo.buffer = dyub;
buffInfo.offset = 0;
buffInfo.range = 1024;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
descriptor_write.pBufferInfo = &buffInfo;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 0, NULL);
m_errorMonitor->VerifyFound();
uint32_t pDynOff[2] = {512, 756};
// Now cause error b/c too many dynOffsets in array for # of dyn descriptors
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Attempting to bind 1 descriptorSets with 1 dynamic descriptors, but ");
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 2, pDynOff);
m_errorMonitor->VerifyFound();
// Finally cause error due to dynamicOffset being too big
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
" dynamic offset 512 combined with offset 0 and range 1024 that oversteps the buffer size of 1024");
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n"
"void main(){\n"
" x = vec4(bar.y);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// This update should succeed, but offset size of 512 will overstep buffer
// /w range 1024 & size 1024
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptorSet, 1, pDynOff);
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
vkDestroyBuffer(m_device->device(), dyub, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, DescriptorBufferUpdateNoMemoryBound) {
TEST_DESCRIPTION("Attempt to update a descriptor with a non-sparse buffer that doesn't have memory bound");
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindBufferMemory().");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkUpdateDescriptorSets() failed write update validation for Descriptor Set 0x");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
// Create a buffer to update the descriptor with
uint32_t qfi = 0;
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffCI.queueFamilyIndexCount = 1;
buffCI.pQueueFamilyIndices = &qfi;
VkBuffer dyub;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub);
ASSERT_VK_SUCCESS(err);
// Attempt to update descriptor without binding memory to it
VkDescriptorBufferInfo buffInfo = {};
buffInfo.buffer = dyub;
buffInfo.offset = 0;
buffInfo.range = 1024;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = descriptorSet;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
descriptor_write.pBufferInfo = &buffInfo;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), dyub, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidPushConstants) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineLayout pipeline_layout;
VkPushConstantRange pc_range = {};
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.pushConstantRangeCount = 1;
pipeline_layout_ci.pPushConstantRanges = &pc_range;
//
// Check for invalid push constant ranges in pipeline layouts.
//
struct PipelineLayoutTestCase {
VkPushConstantRange const range;
char const *msg;
};
const uint32_t too_big = m_device->props.limits.maxPushConstantsSize + 0x4;
const std::array<PipelineLayoutTestCase, 10> range_tests = {{
{{VK_SHADER_STAGE_VERTEX_BIT, 0, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."},
{{VK_SHADER_STAGE_VERTEX_BIT, 0, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."},
{{VK_SHADER_STAGE_VERTEX_BIT, 4, 1}, "vkCreatePipelineLayout() call has push constants index 0 with size 1."},
{{VK_SHADER_STAGE_VERTEX_BIT, 4, 0}, "vkCreatePipelineLayout() call has push constants index 0 with size 0."},
{{VK_SHADER_STAGE_VERTEX_BIT, 1, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset 1. Offset must"},
{{VK_SHADER_STAGE_VERTEX_BIT, 0, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "},
{{VK_SHADER_STAGE_VERTEX_BIT, too_big, too_big}, "vkCreatePipelineLayout() call has push constants index 0 with offset "},
{{VK_SHADER_STAGE_VERTEX_BIT, too_big, 4}, "vkCreatePipelineLayout() call has push constants index 0 with offset "},
{{VK_SHADER_STAGE_VERTEX_BIT, 0xFFFFFFF0, 0x00000020},
"vkCreatePipelineLayout() call has push constants index 0 with offset "},
{{VK_SHADER_STAGE_VERTEX_BIT, 0x00000020, 0xFFFFFFF0},
"vkCreatePipelineLayout() call has push constants index 0 with offset "},
}};
// Check for invalid offset and size
for (const auto &iter : range_tests) {
pc_range = iter.range;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg);
vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
}
// Check for invalid stage flag
pc_range.offset = 0;
pc_range.size = 16;
pc_range.stageFlags = 0;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreatePipelineLayout: value of pCreateInfo->pPushConstantRanges[0].stageFlags must not be 0");
vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
// Check for duplicate stage flags in a list of push constant ranges.
// A shader can only have one push constant block and that block is mapped
// to the push constant range that has that shader's stage flag set.
// The shader's stage flag can only appear once in all the ranges, so the
// implementation can find the one and only range to map it to.
const uint32_t ranges_per_test = 5;
struct DuplicateStageFlagsTestCase {
VkPushConstantRange const ranges[ranges_per_test];
std::vector<char const *> const msg;
};
// Overlapping ranges are OK, but a stage flag can appear only once.
const std::array<DuplicateStageFlagsTestCase, 3> duplicate_stageFlags_tests = {
{
{{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4}},
{
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 1.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 2.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 4.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 2.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 3.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 4.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 3 and 4.",
}},
{{{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4},
{VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}},
{
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 0 and 3.",
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 1 and 4.",
}},
{{{VK_SHADER_STAGE_FRAGMENT_BIT, 0, 4},
{VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_VERTEX_BIT, 0, 4},
{VK_SHADER_STAGE_GEOMETRY_BIT, 0, 4}},
{
"vkCreatePipelineLayout() Duplicate stage flags found in ranges 2 and 3.",
}},
},
};
for (const auto &iter : duplicate_stageFlags_tests) {
pipeline_layout_ci.pPushConstantRanges = iter.ranges;
pipeline_layout_ci.pushConstantRangeCount = ranges_per_test;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, iter.msg.begin(), iter.msg.end());
vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
}
//
// CmdPushConstants tests
//
// Setup a pipeline layout with ranges: [0,32) [16,80)
const std::vector<VkPushConstantRange> pc_range2 = {{VK_SHADER_STAGE_VERTEX_BIT, 16, 64},
{VK_SHADER_STAGE_FRAGMENT_BIT, 0, 32}};
const VkPipelineLayoutObj pipeline_layout_obj(m_device, {}, pc_range2);
const uint8_t dummy_values[100] = {};
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Check for invalid stage flag
// Note that VU 00996 isn't reached due to parameter validation
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdPushConstants: value of stageFlags must not be 0");
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), 0, 0, 16, dummy_values);
m_errorMonitor->VerifyFound();
// Positive tests for the overlapping ranges
m_errorMonitor->ExpectSuccess();
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, dummy_values);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess();
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 32, 48, dummy_values);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess();
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(),
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 16, 16, dummy_values);
m_errorMonitor->VerifyNotFound();
// Wrong cmd stages for extant range
// No range for all cmd stages -- "VUID-vkCmdPushConstants-offset-01795" VUID-vkCmdPushConstants-offset-01795
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795");
// Missing cmd stages for found overlapping range -- "VUID-vkCmdPushConstants-offset-01796" VUID-vkCmdPushConstants-offset-01796
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796");
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_GEOMETRY_BIT, 0, 16, dummy_values);
m_errorMonitor->VerifyFound();
// Wrong no extant range
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795");
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_FRAGMENT_BIT, 80, 4, dummy_values);
m_errorMonitor->VerifyFound();
// Wrong overlapping extent
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01795");
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(),
VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, 20, dummy_values);
m_errorMonitor->VerifyFound();
// Wrong stage flags for valid overlapping range
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushConstants-offset-01796");
vkCmdPushConstants(m_commandBuffer->handle(), pipeline_layout_obj.handle(), VK_SHADER_STAGE_VERTEX_BIT, 16, 16, dummy_values);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, DescriptorSetCompatibility) {
// Test various desriptorSet errors with bad binding combinations
using std::vector;
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
static const uint32_t NUM_DESCRIPTOR_TYPES = 5;
VkDescriptorPoolSize ds_type_count[NUM_DESCRIPTOR_TYPES] = {};
ds_type_count[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count[0].descriptorCount = 10;
ds_type_count[1].type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
ds_type_count[1].descriptorCount = 2;
ds_type_count[2].type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
ds_type_count[2].descriptorCount = 2;
ds_type_count[3].type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count[3].descriptorCount = 5;
// TODO : LunarG ILO driver currently asserts in desc.c w/ INPUT_ATTACHMENT
// type
// ds_type_count[4].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
ds_type_count[4].type = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
ds_type_count[4].descriptorCount = 2;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 5;
ds_pool_ci.poolSizeCount = NUM_DESCRIPTOR_TYPES;
ds_pool_ci.pPoolSizes = ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
static const uint32_t MAX_DS_TYPES_IN_LAYOUT = 2;
VkDescriptorSetLayoutBinding dsl_binding[MAX_DS_TYPES_IN_LAYOUT] = {};
dsl_binding[0].binding = 0;
dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding[0].descriptorCount = 5;
dsl_binding[0].stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding[0].pImmutableSamplers = NULL;
// Create layout identical to set0 layout but w/ different stageFlags
VkDescriptorSetLayoutBinding dsl_fs_stage_only = {};
dsl_fs_stage_only.binding = 0;
dsl_fs_stage_only.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_fs_stage_only.descriptorCount = 5;
dsl_fs_stage_only.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; // Different stageFlags to cause error at
// bind time
dsl_fs_stage_only.pImmutableSamplers = NULL;
vector<VkDescriptorSetLayoutObj> ds_layouts;
// Create 4 unique layouts for full pipelineLayout, and 1 special fs-only
// layout for error case
ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0]));
const VkDescriptorSetLayoutObj ds_layout_fs_only(m_device, {dsl_fs_stage_only});
dsl_binding[0].binding = 0;
dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dsl_binding[0].descriptorCount = 2;
dsl_binding[1].binding = 1;
dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
dsl_binding[1].descriptorCount = 2;
dsl_binding[1].stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding[1].pImmutableSamplers = NULL;
ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>({dsl_binding[0], dsl_binding[1]}));
dsl_binding[0].binding = 0;
dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dsl_binding[0].descriptorCount = 5;
ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0]));
dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
dsl_binding[0].descriptorCount = 2;
ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding[0]));
const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts);
static const uint32_t NUM_SETS = 4;
VkDescriptorSet descriptorSet[NUM_SETS] = {};
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorPool = ds_pool;
alloc_info.descriptorSetCount = ds_vk_layouts.size();
alloc_info.pSetLayouts = ds_vk_layouts.data();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, descriptorSet);
ASSERT_VK_SUCCESS(err);
VkDescriptorSet ds0_fs_only = {};
alloc_info.descriptorSetCount = 1;
alloc_info.pSetLayouts = &ds_layout_fs_only.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &ds0_fs_only);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layouts[0], &ds_layouts[1]});
// Create pipelineLayout with only one setLayout
const VkPipelineLayoutObj single_pipe_layout(m_device, {&ds_layouts[0]});
// Create pipelineLayout with 2 descriptor setLayout at index 0
const VkPipelineLayoutObj pipe_layout_one_desc(m_device, {&ds_layouts[3]});
// Create pipelineLayout with 5 SAMPLER descriptor setLayout at index 0
const VkPipelineLayoutObj pipe_layout_five_samp(m_device, {&ds_layouts[2]});
// Create pipelineLayout with UB type, but stageFlags for FS only
VkPipelineLayoutObj pipe_layout_fs_only(m_device, {&ds_layout_fs_only});
// Create pipelineLayout w/ incompatible set0 layout, but set1 is fine
const VkPipelineLayoutObj pipe_layout_bad_set0(m_device, {&ds_layout_fs_only, &ds_layouts[1]});
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n"
"void main(){\n"
" x = vec4(bar.y);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipe_layout_fs_only.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// TODO : Want to cause various binding incompatibility issues here to test
// DrawState
// First cause various verify_layout_compatibility() fails
// Second disturb early and late sets and verify INFO msgs
// verify_set_layout_compatibility fail cases:
// 1. invalid VkPipelineLayout (layout) passed into vkCmdBindDescriptorSets
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindDescriptorSets-layout-parameter");
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, (VkPipelineLayout)((size_t)0xbaadb1be), 0,
1, &descriptorSet[0], 0, NULL);
m_errorMonitor->VerifyFound();
// 2. layoutIndex exceeds # of layouts in layout
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " attempting to bind set to index 1");
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, single_pipe_layout.handle(), 0, 2,
&descriptorSet[0], 0, NULL);
m_errorMonitor->VerifyFound();
// 3. Pipeline setLayout[0] has 2 descriptors, but set being bound has 5
// descriptors
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " has 2 descriptors, but DescriptorSetLayout ");
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_one_desc.handle(), 0, 1,
&descriptorSet[0], 0, NULL);
m_errorMonitor->VerifyFound();
// 4. same # of descriptors but mismatch in type
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " is type 'VK_DESCRIPTOR_TYPE_SAMPLER' but binding ");
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_five_samp.handle(), 0, 1,
&descriptorSet[0], 0, NULL);
m_errorMonitor->VerifyFound();
// 5. same # of descriptors but mismatch in stageFlags
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" has stageFlags 16 but binding 0 for DescriptorSetLayout ");
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_fs_only.handle(), 0, 1,
&descriptorSet[0], 0, NULL);
m_errorMonitor->VerifyFound();
// Now that we're done actively using the pipelineLayout that gfx pipeline
// was created with, we should be able to delete it. Do that now to verify
// that validation obeys pipelineLayout lifetime
pipe_layout_fs_only.Reset();
// Cause draw-time errors due to PSO incompatibilities
// 1. Error due to not binding required set (we actually use same code as
// above to disturb set0)
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2,
&descriptorSet[0], 0, NULL);
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe_layout_bad_set0.handle(), 1, 1,
&descriptorSet[1], 0, NULL);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " uses set #0 but that set is not bound.");
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
// 2. Error due to bound set not being compatible with PSO's
// VkPipelineLayout (diff stageFlags in this case)
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 2,
&descriptorSet[0], 0, NULL);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " bound as set #0 is not compatible with ");
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
// Remaining clean-up
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, NoBeginCommandBuffer) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"You must call vkBeginCommandBuffer() before this call to ");
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
// Call EndCommandBuffer() w/o calling BeginCommandBuffer()
vkEndCommandBuffer(commandBuffer.handle());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SecondaryCommandBufferNullRenderpass) {
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandBufferObj cb(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
// Force the failure by not setting the Renderpass and Framebuffer fields
VkCommandBufferInheritanceInfo cmd_buf_hinfo = {};
cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
VkCommandBufferBeginInfo cmd_buf_info = {};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = NULL;
cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkCommandBufferBeginInfo-flags-00053");
vkBeginCommandBuffer(cb.handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedExplicitReset) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded");
// A pool we can reset in.
VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
secondary.begin();
secondary.end();
m_commandBuffer->begin();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
// rerecording of secondary
secondary.reset(); // explicit reset here.
secondary.begin();
secondary.end();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SecondaryCommandBufferRerecordedNoReset) {
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "was destroyed or rerecorded");
// A pool we can reset in.
VkCommandPoolObj pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj secondary(m_device, &pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
secondary.begin();
secondary.end();
m_commandBuffer->begin();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
// rerecording of secondary
secondary.begin(); // implicit reset in begin
secondary.end();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CascadedInvalidation) {
ASSERT_NO_FATAL_FAILURE(Init());
VkEventCreateInfo eci = {VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0};
VkEvent event;
vkCreateEvent(m_device->device(), &eci, nullptr, &event);
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
secondary.begin();
vkCmdSetEvent(secondary.handle(), event, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT);
secondary.end();
m_commandBuffer->begin();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_commandBuffer->end();
// destroying the event should invalidate both primary and secondary CB
vkDestroyEvent(m_device->device(), event, nullptr);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "invalid because bound Event");
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CommandBufferResetErrors) {
// Cause error due to Begin while recording CB
// Then cause 2 errors for attempting to reset CB w/o having
// VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT set for the pool from
// which CBs were allocated. Note that this bit is off by default.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Cannot call Begin on command buffer");
ASSERT_NO_FATAL_FAILURE(Init());
// Calls AllocateCommandBuffers
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
// Force the failure by setting the Renderpass and Framebuffer fields with (fake) data
VkCommandBufferInheritanceInfo cmd_buf_hinfo = {};
cmd_buf_hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
VkCommandBufferBeginInfo cmd_buf_info = {};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cmd_buf_info.pNext = NULL;
cmd_buf_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
cmd_buf_info.pInheritanceInfo = &cmd_buf_hinfo;
// Begin CB to transition to recording state
vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info);
// Can't re-begin. This should trigger error
vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkResetCommandBuffer-commandBuffer-00046");
VkCommandBufferResetFlags flags = 0; // Don't care about flags for this test
// Reset attempt will trigger error due to incorrect CommandPool state
vkResetCommandBuffer(commandBuffer.handle(), flags);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBeginCommandBuffer-commandBuffer-00050");
// Transition CB to RECORDED state
vkEndCommandBuffer(commandBuffer.handle());
// Now attempting to Begin will implicitly reset, which triggers error
vkBeginCommandBuffer(commandBuffer.handle(), &cmd_buf_info);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidPipelineCreateState) {
// Attempt to Create Gfx Pipeline w/o a VS
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Invalid Pipeline CreateInfo State: Vertex Shader required");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
VkPipelineRasterizationStateCreateInfo rs_state_ci = {};
rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_state_ci.polygonMode = VK_POLYGON_MODE_FILL;
rs_state_ci.cullMode = VK_CULL_MODE_BACK_BIT;
rs_state_ci.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rs_state_ci.depthClampEnable = VK_FALSE;
rs_state_ci.rasterizerDiscardEnable = VK_TRUE;
rs_state_ci.depthBiasEnable = VK_FALSE;
rs_state_ci.lineWidth = 1.0f;
VkPipelineVertexInputStateCreateInfo vi_ci = {};
vi_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vi_ci.pNext = nullptr;
vi_ci.vertexBindingDescriptionCount = 0;
vi_ci.pVertexBindingDescriptions = nullptr;
vi_ci.vertexAttributeDescriptionCount = 0;
vi_ci.pVertexAttributeDescriptions = nullptr;
VkPipelineInputAssemblyStateCreateInfo ia_ci = {};
ia_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkPipelineShaderStageCreateInfo shaderStages[2];
memset(&shaderStages, 0, 2 * sizeof(VkPipelineShaderStageCreateInfo));
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
shaderStages[0] = fs.GetStageCreateInfo(); // should be: vs.GetStageCreateInfo();
shaderStages[1] = fs.GetStageCreateInfo();
VkGraphicsPipelineCreateInfo gp_ci = {};
gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
gp_ci.pViewportState = nullptr; // no viewport b/c rasterizer is disabled
gp_ci.pRasterizationState = &rs_state_ci;
gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
gp_ci.layout = pipeline_layout.handle();
gp_ci.renderPass = renderPass();
gp_ci.pVertexInputState = &vi_ci;
gp_ci.pInputAssemblyState = &ia_ci;
gp_ci.stageCount = 1;
gp_ci.pStages = shaderStages;
VkPipelineCacheCreateInfo pc_ci = {};
pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
pc_ci.initialDataSize = 0;
pc_ci.pInitialData = 0;
VkPipeline pipeline;
VkPipelineCache pipelineCache;
err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache);
ASSERT_VK_SUCCESS(err);
err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline);
m_errorMonitor->VerifyFound();
vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureDisable) {
// Enable sample shading in pipeline when the feature is disabled.
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Disable sampleRateShading here
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
device_features.sampleRateShading = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Cause the error by enabling sample shading...
auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; };
CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784");
}
TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureEnable) {
// Enable sample shading in pipeline when the feature is disabled.
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Require sampleRateShading here
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
if (device_features.sampleRateShading == VK_FALSE) {
printf("%s SampleRateShading feature is disabled -- skipping related checks.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(&device_features));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
auto range_test = [this](float value, bool positive_test) {
auto info_override = [value](CreatePipelineHelper &helper) {
helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE;
helper.pipe_ms_state_ci_.minSampleShading = value;
};
CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786", positive_test);
};
range_test(NearestSmaller(0.0F), false);
range_test(NearestGreater(1.0F), false);
range_test(0.0F, /* positive_test= */ true);
range_test(1.0F, /* positive_test= */ true);
}
TEST_F(VkLayerTest, InvalidPipelineSamplePNext) {
// Enable sample shading in pipeline when the feature is disabled.
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Set up the extension structs
auto sampleLocations = chain_util::Init<VkPipelineSampleLocationsStateCreateInfoEXT>();
auto coverageToColor = chain_util::Init<VkPipelineCoverageToColorStateCreateInfoNV>();
auto coverageModulation = chain_util::Init<VkPipelineCoverageModulationStateCreateInfoNV>();
auto discriminatrix = [this](const char *name) { return DeviceExtensionSupported(gpu(), nullptr, name); };
chain_util::ExtensionChain chain(discriminatrix, &m_device_extension_names);
chain.Add(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, sampleLocations);
chain.Add(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME, coverageToColor);
chain.Add(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME, coverageModulation);
const void *extension_head = chain.Head();
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (extension_head) {
auto good_chain = [extension_head](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = extension_head; };
CreatePipelineHelper::OneshotTest(*this, good_chain, (VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT),
"No error", true);
} else {
printf("%s Required extension not present -- skipping positive checks.\n", kSkipPrefix);
}
auto instance_ci = chain_util::Init<VkInstanceCreateInfo>();
auto bad_chain = [&instance_ci](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = &instance_ci; };
CreatePipelineHelper::OneshotTest(*this, bad_chain, VK_DEBUG_REPORT_WARNING_BIT_EXT,
"VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext");
}
/*// TODO : This test should be good, but needs Tess support in compiler to run
TEST_F(VkLayerTest, InvalidPatchControlPoints)
{
// Attempt to Create Gfx Pipeline w/o a VS
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH
primitive ");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(),
VK_DESCRIPTOR_POOL_USAGE_NON_FREE, 1, &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
VkDescriptorSetLayoutCreateInfo ds_layout_ci = {};
ds_layout_ci.sType =
VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ds_layout_ci.pNext = NULL;
ds_layout_ci.bindingCount = 1;
ds_layout_ci.pBindings = &dsl_binding;
VkDescriptorSetLayout ds_layout;
err = vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL,
&ds_layout);
ASSERT_VK_SUCCESS(err);
VkDescriptorSet descriptorSet;
err = vkAllocateDescriptorSets(m_device->device(), ds_pool,
VK_DESCRIPTOR_SET_USAGE_NON_FREE, 1, &ds_layout, &descriptorSet);
ASSERT_VK_SUCCESS(err);
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType =
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.pNext = NULL;
pipeline_layout_ci.setLayoutCount = 1;
pipeline_layout_ci.pSetLayouts = &ds_layout;
VkPipelineLayout pipeline_layout;
err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL,
&pipeline_layout);
ASSERT_VK_SUCCESS(err);
VkPipelineShaderStageCreateInfo shaderStages[3];
memset(&shaderStages, 0, 3 * sizeof(VkPipelineShaderStageCreateInfo));
VkShaderObj vs(m_device,bindStateVertShaderText,VK_SHADER_STAGE_VERTEX_BIT,
this);
// Just using VS txt for Tess shaders as we don't care about functionality
VkShaderObj
tc(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
this);
VkShaderObj
te(m_device,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
this);
shaderStages[0].sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT;
shaderStages[0].shader = vs.handle();
shaderStages[1].sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[1].stage = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
shaderStages[1].shader = tc.handle();
shaderStages[2].sType =
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
shaderStages[2].stage = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
shaderStages[2].shader = te.handle();
VkPipelineInputAssemblyStateCreateInfo iaCI = {};
iaCI.sType =
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
iaCI.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
VkPipelineTessellationStateCreateInfo tsCI = {};
tsCI.sType = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO;
tsCI.patchControlPoints = 0; // This will cause an error
VkGraphicsPipelineCreateInfo gp_ci = {};
gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
gp_ci.pNext = NULL;
gp_ci.stageCount = 3;
gp_ci.pStages = shaderStages;
gp_ci.pVertexInputState = NULL;
gp_ci.pInputAssemblyState = &iaCI;
gp_ci.pTessellationState = &tsCI;
gp_ci.pViewportState = NULL;
gp_ci.pRasterizationState = NULL;
gp_ci.pMultisampleState = NULL;
gp_ci.pDepthStencilState = NULL;
gp_ci.pColorBlendState = NULL;
gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
gp_ci.layout = pipeline_layout;
gp_ci.renderPass = renderPass();
VkPipelineCacheCreateInfo pc_ci = {};
pc_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
pc_ci.pNext = NULL;
pc_ci.initialSize = 0;
pc_ci.initialData = 0;
pc_ci.maxSize = 0;
VkPipeline pipeline;
VkPipelineCache pipelineCache;
err = vkCreatePipelineCache(m_device->device(), &pc_ci, NULL,
&pipelineCache);
ASSERT_VK_SUCCESS(err);
err = vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1,
&gp_ci, NULL, &pipeline);
m_errorMonitor->VerifyFound();
vkDestroyPipelineCache(m_device->device(), pipelineCache, NULL);
vkDestroyPipelineLayout(m_device->device(), pipeline_layout, NULL);
vkDestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
*/
TEST_F(VkLayerTest, PSOViewportStateTests) {
TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for non-multiViewport");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const auto break_vp_state = [](CreatePipelineHelper &helper) {
helper.rs_state_ci_.rasterizerDiscardEnable = VK_FALSE;
helper.gp_ci_.pViewportState = nullptr;
};
CreatePipelineHelper::OneshotTest(*this, break_vp_state, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750");
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkViewport viewports[] = {viewport, viewport};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkRect2D scissors[] = {scissor, scissor};
// test viewport and scissor arrays
using std::vector;
struct TestCase {
uint32_t viewport_count;
VkViewport *viewports;
uint32_t scissor_count;
VkRect2D *scissors;
vector<std::string> vuids;
};
vector<TestCase> test_cases = {
{0,
viewports,
1,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
viewports,
1,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{1,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{1,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{0,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}},
{2,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}},
{0,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{1, nullptr, 1, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}},
{1, viewports, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}},
{1,
nullptr,
1,
nullptr,
{"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}},
{2,
nullptr,
3,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747",
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}},
{0,
nullptr,
0,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}},
};
for (const auto &test_case : test_cases) {
const auto break_vp = [&test_case](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = test_case.viewport_count;
helper.vp_state_ci_.pViewports = test_case.viewports;
helper.vp_state_ci_.scissorCount = test_case.scissor_count;
helper.vp_state_ci_.pScissors = test_case.scissors;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids);
}
vector<TestCase> dyn_test_cases = {
{0,
viewports,
1,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
viewports,
1,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{1,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{1,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{0,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}},
{2,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}},
{0,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
nullptr,
3,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{0,
nullptr,
0,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}},
};
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
for (const auto &test_case : dyn_test_cases) {
const auto break_vp = [&](CreatePipelineHelper &helper) {
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
helper.dyn_state_ci_ = dyn_state_ci;
helper.vp_state_ci_.viewportCount = test_case.viewport_count;
helper.vp_state_ci_.pViewports = test_case.viewports;
helper.vp_state_ci_.scissorCount = test_case.scissor_count;
helper.vp_state_ci_.pScissors = test_case.scissors;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids);
}
}
// Set Extension dynamic states without enabling the required Extensions.
TEST_F(VkLayerTest, ExtensionDynamicStatesSetWOExtensionEnabled) {
TEST_DESCRIPTION("Create a graphics pipeline with Extension dynamic states without enabling the required Extensions.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
using std::vector;
struct TestCase {
uint32_t dynamic_state_count;
VkDynamicState dynamic_state;
char const *errmsg;
};
vector<TestCase> dyn_test_cases = {
{1, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV,
"contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but VK_NV_clip_space_w_scaling"},
{1, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT,
"contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but VK_EXT_discard_rectangles"},
{1, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, "contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but VK_EXT_sample_locations"},
};
for (const auto &test_case : dyn_test_cases) {
VkDynamicState state[1];
state[0] = test_case.dynamic_state;
const auto break_vp = [&](CreatePipelineHelper &helper) {
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = test_case.dynamic_state_count;
dyn_state_ci.pDynamicStates = state;
helper.dyn_state_ci_ = dyn_state_ci;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.errmsg);
}
}
TEST_F(VkLayerTest, PSOViewportStateMultiViewportTests) {
TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for multiViewport feature");
ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features
if (!m_device->phy().features().multiViewport) {
printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix);
return;
}
// at least 16 viewports supported from here on
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkViewport viewports[] = {viewport, viewport};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkRect2D scissors[] = {scissor, scissor};
using std::vector;
struct TestCase {
uint32_t viewport_count;
VkViewport *viewports;
uint32_t scissor_count;
VkRect2D *scissors;
vector<std::string> vuids;
};
vector<TestCase> test_cases = {
{0,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{0,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}},
{2, nullptr, 2, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}},
{2, viewports, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}},
{2,
nullptr,
2,
nullptr,
{"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}},
{0,
nullptr,
0,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}},
};
const auto max_viewports = m_device->phy().properties().limits.maxViewports;
const bool max_viewports_maxxed = max_viewports == std::numeric_limits<decltype(max_viewports)>::max();
if (max_viewports_maxxed) {
printf("%s VkPhysicalDeviceLimits::maxViewports is UINT32_MAX -- skipping part of test requiring to exceed maxViewports.\n",
kSkipPrefix);
} else {
const auto too_much_viewports = max_viewports + 1;
// avoid potentially big allocations by using only nullptr
test_cases.push_back({too_much_viewports,
nullptr,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220",
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}});
test_cases.push_back({2,
viewports,
too_much_viewports,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220",
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}});
test_cases.push_back(
{too_much_viewports,
nullptr,
too_much_viewports,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747",
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}});
}
for (const auto &test_case : test_cases) {
const auto break_vp = [&test_case](CreatePipelineHelper &helper) {
helper.vp_state_ci_.viewportCount = test_case.viewport_count;
helper.vp_state_ci_.pViewports = test_case.viewports;
helper.vp_state_ci_.scissorCount = test_case.scissor_count;
helper.vp_state_ci_.pScissors = test_case.scissors;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids);
}
vector<TestCase> dyn_test_cases = {
{0,
viewports,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{2,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}},
{0,
viewports,
0,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}},
{0,
nullptr,
0,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}},
};
if (!max_viewports_maxxed) {
const auto too_much_viewports = max_viewports + 1;
// avoid potentially big allocations by using only nullptr
dyn_test_cases.push_back({too_much_viewports,
nullptr,
2,
scissors,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}});
dyn_test_cases.push_back({2,
viewports,
too_much_viewports,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}});
dyn_test_cases.push_back({too_much_viewports,
nullptr,
too_much_viewports,
nullptr,
{"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218",
"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219"}});
}
const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
for (const auto &test_case : dyn_test_cases) {
const auto break_vp = [&](CreatePipelineHelper &helper) {
VkPipelineDynamicStateCreateInfo dyn_state_ci = {};
dyn_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dyn_state_ci.dynamicStateCount = size(dyn_states);
dyn_state_ci.pDynamicStates = dyn_states;
helper.dyn_state_ci_ = dyn_state_ci;
helper.vp_state_ci_.viewportCount = test_case.viewport_count;
helper.vp_state_ci_.pViewports = test_case.viewports;
helper.vp_state_ci_.scissorCount = test_case.scissor_count;
helper.vp_state_ci_.pScissors = test_case.scissors;
};
CreatePipelineHelper::OneshotTest(*this, break_vp, VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuids);
}
}
TEST_F(VkLayerTest, DynViewportAndScissorUndefinedDrawState) {
TEST_DESCRIPTION("Test viewport and scissor dynamic state that is not set before draw");
ASSERT_NO_FATAL_FAILURE(Init());
// TODO: should also test on !multiViewport
if (!m_device->phy().features().multiViewport) {
printf("%s Device does not support multiple viewports/scissors; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
const VkPipelineLayoutObj pipeline_layout(m_device);
VkPipelineObj pipeline_dyn_vp(m_device);
pipeline_dyn_vp.AddShader(&vs);
pipeline_dyn_vp.AddShader(&fs);
pipeline_dyn_vp.AddDefaultColorAttachment();
pipeline_dyn_vp.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT);
pipeline_dyn_vp.SetScissor(m_scissors);
ASSERT_VK_SUCCESS(pipeline_dyn_vp.CreateVKPipeline(pipeline_layout.handle(), m_renderPass));
VkPipelineObj pipeline_dyn_sc(m_device);
pipeline_dyn_sc.AddShader(&vs);
pipeline_dyn_sc.AddShader(&fs);
pipeline_dyn_sc.AddDefaultColorAttachment();
pipeline_dyn_sc.SetViewport(m_viewports);
pipeline_dyn_sc.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR);
ASSERT_VK_SUCCESS(pipeline_dyn_sc.CreateVKPipeline(pipeline_layout.handle(), m_renderPass));
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Dynamic viewport(s) 0 are used by pipeline state object, ");
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_vp.handle());
vkCmdSetViewport(m_commandBuffer->handle(), 1, 1,
&m_viewports[0]); // Forgetting to set needed 0th viewport (PSO viewportCount == 1)
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Dynamic scissor(s) 0 are used by pipeline state object, ");
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_sc.handle());
vkCmdSetScissor(m_commandBuffer->handle(), 1, 1,
&m_scissors[0]); // Forgetting to set needed 0th scissor (PSO scissorCount == 1)
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, PSOLineWidthInvalid) {
TEST_DESCRIPTION("Test non-1.0 lineWidth errors when pipeline is created and in vkCmdSetLineWidth");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineShaderStageCreateInfo shader_state_cis[] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()};
VkPipelineVertexInputStateCreateInfo vi_state_ci = {};
vi_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
VkPipelineInputAssemblyStateCreateInfo ia_state_ci = {};
ia_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
ia_state_ci.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkPipelineViewportStateCreateInfo vp_state_ci = {};
vp_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp_state_ci.viewportCount = 1;
vp_state_ci.pViewports = &viewport;
vp_state_ci.scissorCount = 1;
vp_state_ci.pScissors = &scissor;
VkPipelineRasterizationStateCreateInfo rs_state_ci = {};
rs_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_state_ci.rasterizerDiscardEnable = VK_FALSE;
// lineWidth to be set by checks
VkPipelineMultisampleStateCreateInfo ms_state_ci = {};
ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; // must match subpass att.
VkPipelineColorBlendAttachmentState cba_state = {};
VkPipelineColorBlendStateCreateInfo cb_state_ci = {};
cb_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
cb_state_ci.attachmentCount = 1; // must match count in subpass
cb_state_ci.pAttachments = &cba_state;
const VkPipelineLayoutObj pipeline_layout(m_device);
VkGraphicsPipelineCreateInfo gp_ci = {};
gp_ci.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
gp_ci.stageCount = sizeof(shader_state_cis) / sizeof(VkPipelineShaderStageCreateInfo);
gp_ci.pStages = shader_state_cis;
gp_ci.pVertexInputState = &vi_state_ci;
gp_ci.pInputAssemblyState = &ia_state_ci;
gp_ci.pViewportState = &vp_state_ci;
gp_ci.pRasterizationState = &rs_state_ci;
gp_ci.pMultisampleState = &ms_state_ci;
gp_ci.pColorBlendState = &cb_state_ci;
gp_ci.layout = pipeline_layout.handle();
gp_ci.renderPass = renderPass();
gp_ci.subpass = 0;
const std::vector<float> test_cases = {-1.0f, 0.0f, NearestSmaller(1.0f), NearestGreater(1.0f), NAN};
// test VkPipelineRasterizationStateCreateInfo::lineWidth
for (const auto test_case : test_cases) {
rs_state_ci.lineWidth = test_case;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749");
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->device(), VK_NULL_HANDLE, 1, &gp_ci, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
}
// test vkCmdSetLineWidth
m_commandBuffer->begin();
for (const auto test_case : test_cases) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetLineWidth-lineWidth-00788");
vkCmdSetLineWidth(m_commandBuffer->handle(), test_case);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_binding_00618) {
TEST_DESCRIPTION(
"Test VUID-VkVertexInputBindingDescription-binding-00618: binding must be less than "
"VkPhysicalDeviceLimits::maxVertexInputBindings");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineCache pipeline_cache;
{
VkPipelineCacheCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache);
ASSERT_VK_SUCCESS(err);
}
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineShaderStageCreateInfo stages[2]{{}};
stages[0] = vs.GetStageCreateInfo();
stages[1] = fs.GetStageCreateInfo();
// Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings.
VkVertexInputBindingDescription vertex_input_binding_description{};
vertex_input_binding_description.binding = m_device->props.limits.maxVertexInputBindings;
VkPipelineVertexInputStateCreateInfo vertex_input_state{};
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = nullptr;
vertex_input_state.vertexBindingDescriptionCount = 1;
vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description;
vertex_input_state.vertexAttributeDescriptionCount = 0;
vertex_input_state.pVertexAttributeDescriptions = nullptr;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state{};
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkPipelineViewportStateCreateInfo viewport_state{};
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.viewportCount = 1;
viewport_state.pViewports = &viewport;
viewport_state.scissorCount = 1;
viewport_state.pScissors = &scissor;
VkPipelineMultisampleStateCreateInfo multisample_state{};
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = nullptr;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.sampleShadingEnable = 0;
multisample_state.minSampleShading = 1.0;
multisample_state.pSampleMask = nullptr;
VkPipelineRasterizationStateCreateInfo rasterization_state{};
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT;
rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.lineWidth = 1.0f;
const VkPipelineLayoutObj pipeline_layout(m_device);
{
VkGraphicsPipelineCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.stageCount = 2;
create_info.pStages = stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pViewportState = &viewport_state;
create_info.pMultisampleState = &multisample_state;
create_info.pRasterizationState = &rasterization_state;
create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
create_info.layout = pipeline_layout.handle();
create_info.renderPass = renderPass();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-binding-00618");
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
}
vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr);
}
TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_stride_00619) {
TEST_DESCRIPTION(
"Test VUID-VkVertexInputBindingDescription-stride-00619: stride must be less than or equal to "
"VkPhysicalDeviceLimits::maxVertexInputBindingStride");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineCache pipeline_cache;
{
VkPipelineCacheCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache);
ASSERT_VK_SUCCESS(err);
}
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineShaderStageCreateInfo stages[2]{{}};
stages[0] = vs.GetStageCreateInfo();
stages[1] = fs.GetStageCreateInfo();
// Test when stride is greater than VkPhysicalDeviceLimits::maxVertexInputBindingStride.
VkVertexInputBindingDescription vertex_input_binding_description{};
vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride + 1;
VkPipelineVertexInputStateCreateInfo vertex_input_state{};
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = nullptr;
vertex_input_state.vertexBindingDescriptionCount = 1;
vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description;
vertex_input_state.vertexAttributeDescriptionCount = 0;
vertex_input_state.pVertexAttributeDescriptions = nullptr;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state{};
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkPipelineViewportStateCreateInfo viewport_state{};
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.viewportCount = 1;
viewport_state.pViewports = &viewport;
viewport_state.scissorCount = 1;
viewport_state.pScissors = &scissor;
VkPipelineMultisampleStateCreateInfo multisample_state{};
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = nullptr;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.sampleShadingEnable = 0;
multisample_state.minSampleShading = 1.0;
multisample_state.pSampleMask = nullptr;
VkPipelineRasterizationStateCreateInfo rasterization_state{};
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT;
rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.lineWidth = 1.0f;
const VkPipelineLayoutObj pipeline_layout(m_device);
{
VkGraphicsPipelineCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.stageCount = 2;
create_info.pStages = stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pViewportState = &viewport_state;
create_info.pMultisampleState = &multisample_state;
create_info.pRasterizationState = &rasterization_state;
create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
create_info.layout = pipeline_layout.handle();
create_info.renderPass = renderPass();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputBindingDescription-stride-00619");
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
}
vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr);
}
TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_location_00620) {
TEST_DESCRIPTION(
"Test VUID-VkVertexInputAttributeDescription-location-00620: location must be less than "
"VkPhysicalDeviceLimits::maxVertexInputAttributes");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineCache pipeline_cache;
{
VkPipelineCacheCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache);
ASSERT_VK_SUCCESS(err);
}
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineShaderStageCreateInfo stages[2]{{}};
stages[0] = vs.GetStageCreateInfo();
stages[1] = fs.GetStageCreateInfo();
// Test when location is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes.
VkVertexInputAttributeDescription vertex_input_attribute_description{};
vertex_input_attribute_description.location = m_device->props.limits.maxVertexInputAttributes;
VkPipelineVertexInputStateCreateInfo vertex_input_state{};
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = nullptr;
vertex_input_state.vertexBindingDescriptionCount = 0;
vertex_input_state.pVertexBindingDescriptions = nullptr;
vertex_input_state.vertexAttributeDescriptionCount = 1;
vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state{};
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkPipelineViewportStateCreateInfo viewport_state{};
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.viewportCount = 1;
viewport_state.pViewports = &viewport;
viewport_state.scissorCount = 1;
viewport_state.pScissors = &scissor;
VkPipelineMultisampleStateCreateInfo multisample_state{};
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = nullptr;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.sampleShadingEnable = 0;
multisample_state.minSampleShading = 1.0;
multisample_state.pSampleMask = nullptr;
VkPipelineRasterizationStateCreateInfo rasterization_state{};
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT;
rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.lineWidth = 1.0f;
const VkPipelineLayoutObj pipeline_layout(m_device);
{
VkGraphicsPipelineCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.stageCount = 2;
create_info.pStages = stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pViewportState = &viewport_state;
create_info.pMultisampleState = &multisample_state;
create_info.pRasterizationState = &rasterization_state;
create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
create_info.layout = pipeline_layout.handle();
create_info.renderPass = renderPass();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkVertexInputAttributeDescription-location-00620");
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
}
vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr);
}
TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_binding_00621) {
TEST_DESCRIPTION(
"Test VUID-VkVertexInputAttributeDescription-binding-00621: binding must be less than "
"VkPhysicalDeviceLimits::maxVertexInputBindings");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineCache pipeline_cache;
{
VkPipelineCacheCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache);
ASSERT_VK_SUCCESS(err);
}
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineShaderStageCreateInfo stages[2]{{}};
stages[0] = vs.GetStageCreateInfo();
stages[1] = fs.GetStageCreateInfo();
// Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings.
VkVertexInputAttributeDescription vertex_input_attribute_description{};
vertex_input_attribute_description.binding = m_device->props.limits.maxVertexInputBindings;
VkPipelineVertexInputStateCreateInfo vertex_input_state{};
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = nullptr;
vertex_input_state.vertexBindingDescriptionCount = 0;
vertex_input_state.pVertexBindingDescriptions = nullptr;
vertex_input_state.vertexAttributeDescriptionCount = 1;
vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state{};
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
VkRect2D scissor = {{0, 0}, {64, 64}};
VkPipelineViewportStateCreateInfo viewport_state{};
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.viewportCount = 1;
viewport_state.pViewports = &viewport;
viewport_state.scissorCount = 1;
viewport_state.pScissors = &scissor;
VkPipelineMultisampleStateCreateInfo multisample_state{};
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = nullptr;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.sampleShadingEnable = 0;
multisample_state.minSampleShading = 1.0;
multisample_state.pSampleMask = nullptr;
VkPipelineRasterizationStateCreateInfo rasterization_state{};
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT;
rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.lineWidth = 1.0f;
const VkPipelineLayoutObj pipeline_layout(m_device);
{
VkGraphicsPipelineCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.stageCount = 2;
create_info.pStages = stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pViewportState = &viewport_state;
create_info.pMultisampleState = &multisample_state;
create_info.pRasterizationState = &rasterization_state;
create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
create_info.layout = pipeline_layout.handle();
create_info.renderPass = renderPass();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-binding-00621");
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
}
vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr);
}
TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_offset_00622) {
TEST_DESCRIPTION(
"Test VUID-VkVertexInputAttributeDescription-offset-00622: offset must be less than or equal to "
"VkPhysicalDeviceLimits::maxVertexInputAttributeOffset");
EnableDeviceProfileLayer();
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
uint32_t maxVertexInputAttributeOffset = 0;
{
VkPhysicalDeviceProperties device_props = {};
vkGetPhysicalDeviceProperties(gpu(), &device_props);
maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset;
if (maxVertexInputAttributeOffset == 0xFFFFFFFF) {
// Attempt to artificially lower maximum offset
PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT =
(PFN_vkSetPhysicalDeviceLimitsEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT");
if (!fpvkSetPhysicalDeviceLimitsEXT) {
printf("%s All offsets are valid & device_profile_api not found; skipped.\n", kSkipPrefix);
return;
}
device_props.limits.maxVertexInputAttributeOffset = device_props.limits.maxVertexInputBindingStride - 2;
fpvkSetPhysicalDeviceLimitsEXT(gpu(), &device_props.limits);
maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset;
}
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineCache pipeline_cache;
{
VkPipelineCacheCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VkResult err = vkCreatePipelineCache(m_device->device(), &create_info, nullptr, &pipeline_cache);
ASSERT_VK_SUCCESS(err);
}
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineShaderStageCreateInfo stages[2]{{}};
stages[0] = vs.GetStageCreateInfo();
stages[1] = fs.GetStageCreateInfo();
VkVertexInputBindingDescription vertex_input_binding_description{};
vertex_input_binding_description.binding = 0;
vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride;
vertex_input_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
// Test when offset is greater than maximum.
VkVertexInputAttributeDescription vertex_input_attribute_description{};
vertex_input_attribute_description.format = VK_FORMAT_R8_UNORM;
vertex_input_attribute_description.offset = maxVertexInputAttributeOffset + 1;
VkPipelineVertexInputStateCreateInfo vertex_input_state{};
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = nullptr;
vertex_input_state.vertexBindingDescriptionCount = 1;
vertex_input_state.pVertexBindingDescriptions = &vertex_input_binding_description;
vertex_input_state.vertexAttributeDescriptionCount = 1;
vertex_input_state.pVertexAttributeDescriptions = &vertex_input_attribute_description;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state{};
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkPipelineMultisampleStateCreateInfo multisample_state{};
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = nullptr;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.sampleShadingEnable = 0;
multisample_state.minSampleShading = 1.0;
multisample_state.pSampleMask = nullptr;
VkPipelineRasterizationStateCreateInfo rasterization_state{};
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT;
rasterization_state.frontFace = VK_FRONT_FACE_COUNTER_CLOCKWISE;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_TRUE;
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.lineWidth = 1.0f;
const VkPipelineLayoutObj pipeline_layout(m_device);
{
VkGraphicsPipelineCreateInfo create_info{};
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.stageCount = 2;
create_info.pStages = stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pViewportState = nullptr; // no viewport b/c rasterizer is disabled
create_info.pMultisampleState = &multisample_state;
create_info.pRasterizationState = &rasterization_state;
create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
create_info.layout = pipeline_layout.handle();
create_info.renderPass = renderPass();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkVertexInputAttributeDescription-offset-00622");
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->device(), pipeline_cache, 1, &create_info, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
}
vkDestroyPipelineCache(m_device->device(), pipeline_cache, nullptr);
}
TEST_F(VkLayerTest, NullRenderPass) {
// Bind a NULL RenderPass
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdBeginRenderPass: required parameter pRenderPassBegin specified as NULL");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
// Don't care about RenderPass handle b/c error should be flagged before
// that
vkCmdBeginRenderPass(m_commandBuffer->handle(), NULL, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, RenderPassWithinRenderPass) {
// Bind a BeginRenderPass within an active RenderPass
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"It is invalid to issue this call inside an active render pass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Just create a dummy Renderpass that's non-NULL so we can get to the
// proper error
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, RenderPassClearOpMismatch) {
TEST_DESCRIPTION(
"Begin a renderPass where clearValueCount is less than the number of renderPass attachments that use "
"loadOpVK_ATTACHMENT_LOAD_OP_CLEAR.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create a renderPass with a single attachment that uses loadOp CLEAR
VkAttachmentReference attach = {};
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpass = {};
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &attach;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
VkAttachmentDescription attach_desc = {};
attach_desc.format = VK_FORMAT_B8G8R8A8_UNORM;
// Set loadOp to CLEAR
attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = &attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass rp;
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
VkCommandBufferInheritanceInfo hinfo = {};
hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
hinfo.renderPass = VK_NULL_HANDLE;
hinfo.subpass = 0;
hinfo.framebuffer = VK_NULL_HANDLE;
hinfo.occlusionQueryEnable = VK_FALSE;
hinfo.queryFlags = 0;
hinfo.pipelineStatistics = 0;
VkCommandBufferBeginInfo info = {};
info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.pInheritanceInfo = &hinfo;
vkBeginCommandBuffer(m_commandBuffer->handle(), &info);
VkRenderPassBeginInfo rp_begin = {};
rp_begin.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rp_begin.pNext = NULL;
rp_begin.renderPass = renderPass();
rp_begin.framebuffer = framebuffer();
rp_begin.clearValueCount = 0; // Should be 1
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRenderPassBeginInfo-clearValueCount-00902");
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rp_begin, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
TEST_F(VkLayerTest, EndCommandBufferWithinRenderPass) {
TEST_DESCRIPTION("End a command buffer with an active render pass");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"It is invalid to issue this call inside an active render pass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkEndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
// End command buffer properly to avoid driver issues. This is safe -- the
// previous vkEndCommandBuffer should not have reached the driver.
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// TODO: Add test for VK_COMMAND_BUFFER_LEVEL_SECONDARY
// TODO: Add test for VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT
}
TEST_F(VkLayerTest, FillBufferWithinRenderPass) {
// Call CmdFillBuffer within an active renderpass
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"It is invalid to issue this call inside an active render pass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VkBufferObj dstBuffer;
dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs);
m_commandBuffer->FillBuffer(dstBuffer.handle(), 0, 4, 0x11111111);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, UpdateBufferWithinRenderPass) {
// Call CmdUpdateBuffer within an active renderpass
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"It is invalid to issue this call inside an active render pass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
VkBufferObj dstBuffer;
dstBuffer.init_as_dst(*m_device, (VkDeviceSize)1024, reqs);
VkDeviceSize dstOffset = 0;
uint32_t Data[] = {1, 2, 3, 4, 5, 6, 7, 8};
VkDeviceSize dataSize = sizeof(Data) / sizeof(uint32_t);
vkCmdUpdateBuffer(m_commandBuffer->handle(), dstBuffer.handle(), dstOffset, dataSize, &Data);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, ClearColorImageWithBadRange) {
TEST_DESCRIPTION("Record clear color with an invalid VkImageSubresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}};
m_commandBuffer->begin();
const auto cb_handle = m_commandBuffer->handle();
// Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseMipLevel-01470");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try levelCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel + levelCount > image.mipLevels
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01692");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-baseArrayLayer-01472");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try layerCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer + layerCount > image.arrayLayers
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-pRanges-01693");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ClearDepthStencilWithBadRange) {
TEST_DESCRIPTION("Record clear depth with an invalid VkImageSubresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageObj image(m_device);
image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkClearDepthStencilValue clear_value = {};
m_commandBuffer->begin();
const auto cb_handle = m_commandBuffer->handle();
// Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474");
const VkImageSubresourceRange range = {ds_aspect, 1, VK_REMAINING_MIP_LEVELS, 0, 1};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694");
const VkImageSubresourceRange range = {ds_aspect, 1, 1, 0, 1};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try levelCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694");
const VkImageSubresourceRange range = {ds_aspect, 0, 0, 0, 1};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel + levelCount > image.mipLevels
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01694");
const VkImageSubresourceRange range = {ds_aspect, 0, 2, 0, 1};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476");
const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695");
const VkImageSubresourceRange range = {ds_aspect, 0, 1, 1, 1};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try layerCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695");
const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 0};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer + layerCount > image.arrayLayers
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-pRanges-01695");
const VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 2};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ClearColorImageWithinRenderPass) {
// Call CmdClearColorImage within an active RenderPass
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"It is invalid to issue this call inside an active render pass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkClearColorValue clear_color;
memset(clear_color.uint32, 0, sizeof(uint32_t) * 4);
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image dstImage;
dstImage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs);
const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT);
vkCmdClearColorImage(m_commandBuffer->handle(), dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &range);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, ClearDepthStencilImageErrors) {
// Hit errors related to vkCmdClearDepthStencilImage()
// 1. Use an image that doesn't have VK_IMAGE_USAGE_TRANSFER_DST_BIT set
// 2. Call CmdClearDepthStencilImage within an active RenderPass
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkClearDepthStencilValue clear_value = {0};
VkMemoryPropertyFlags reqs = 0;
VkImageCreateInfo image_create_info = vk_testing::Image::create_info();
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = depth_format;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Error here is that VK_IMAGE_USAGE_TRANSFER_DST_BIT is excluded for DS image that we'll call Clear on below
image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
vk_testing::Image dst_image_bad_usage;
dst_image_bad_usage.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs);
const VkImageSubresourceRange range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-image-00009");
vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image_bad_usage.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1,
&range);
m_errorMonitor->VerifyFound();
// Fix usage for next test case
image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image dst_image;
dst_image.init(*m_device, (const VkImageCreateInfo &)image_create_info, reqs);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-renderpass");
vkCmdClearDepthStencilImage(m_commandBuffer->handle(), dst_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_value, 1, &range);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, ClearColorAttachmentsOutsideRenderPass) {
// Call CmdClearAttachmentss outside of an active RenderPass
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdClearAttachments(): This call must be issued inside an active render pass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Start no RenderPass
m_commandBuffer->begin();
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 0;
color_attachment.clearValue.color.float32[1] = 0;
color_attachment.clearValue.color.float32[2] = 0;
color_attachment.clearValue.color.float32[3] = 0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {32, 32}}};
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassExcessiveNextSubpass) {
TEST_DESCRIPTION("Test that an error is produced when CmdNextSubpass is called too many times in a renderpass instance");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdNextSubpass(): Attempted to advance beyond final subpass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// error here.
vkCmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, RenderPassEndedBeforeFinalSubpass) {
TEST_DESCRIPTION("Test that an error is produced when CmdEndRenderPass is called before the final subpass has been reached");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdEndRenderPass(): Called before reaching final subpass");
ASSERT_NO_FATAL_FAILURE(Init());
VkSubpassDescription sd[2] = {{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr},
{0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, nullptr, 0, nullptr}};
VkRenderPassCreateInfo rcpi = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 2, sd, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rcpi, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 16, 16, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fbci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin(); // no implicit RP begin
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {16, 16}}, 0, nullptr};
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
// Error here.
vkCmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
// Clean up.
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, BufferMemoryBarrierNoBuffer) {
// Try to add a buffer memory barrier with no buffer.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"required parameter pBufferMemoryBarriers[0].buffer specified as VK_NULL_HANDLE");
ASSERT_NO_FATAL_FAILURE(Init());
m_commandBuffer->begin();
VkBufferMemoryBarrier buf_barrier = {};
buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
buf_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
buf_barrier.buffer = VK_NULL_HANDLE;
buf_barrier.offset = 0;
buf_barrier.size = VK_WHOLE_SIZE;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr,
1, &buf_barrier, 0, nullptr);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidBarriers) {
TEST_DESCRIPTION("A variety of ways to get VK_INVALID_BARRIER ");
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
// Add a token self-dependency for this test to avoid unexpected errors
m_addRenderPassSelfDependency = true;
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
// Use image unbound to memory in barrier
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindImageMemory()");
vk_testing::Image unbound_image;
auto unbound_image_info = vk_testing::Image::create_info();
unbound_image_info.format = VK_FORMAT_B8G8R8A8_UNORM;
unbound_image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
unbound_image.init_no_mem(*m_device, unbound_image_info);
auto unbound_subresource = vk_testing::Image::subresource_range(unbound_image_info, VK_IMAGE_ASPECT_COLOR_BIT);
auto unbound_image_barrier = unbound_image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, unbound_subresource);
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &unbound_image_barrier);
m_errorMonitor->VerifyFound();
// Use buffer unbound to memory in barrier
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindBufferMemory()");
VkBufferObj unbound_buffer;
auto unbound_buffer_info = VkBufferObj::create_info(16, VK_IMAGE_USAGE_TRANSFER_DST_BIT);
unbound_buffer.init_no_mem(*m_device, unbound_buffer_info);
auto unbound_buffer_barrier = unbound_buffer.buffer_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, 0, 16);
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0,
nullptr, 1, &unbound_buffer_barrier, 0, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-newLayout-01198");
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = NULL;
img_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// New layout can't be UNDEFINED
img_barrier.newLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_barrier.image = m_renderTargets[0]->handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Transition image to color attachment optimal
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
// Can't send buffer memory barrier during a render pass
vkCmdEndRenderPass(m_commandBuffer->handle());
VkBufferObj buffer;
VkMemoryPropertyFlags mem_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
buffer.init_as_src_and_dst(*m_device, 256, mem_reqs);
VkBufferMemoryBarrier buf_barrier = {};
buf_barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
buf_barrier.pNext = NULL;
buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
buf_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
buf_barrier.buffer = buffer.handle();
buf_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
buf_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
buf_barrier.offset = 0;
buf_barrier.size = VK_WHOLE_SIZE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-offset-01187");
// Exceed the buffer size
buf_barrier.offset = buffer.create_info().size + 1;
// Offset greater than total size
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0,
nullptr);
m_errorMonitor->VerifyFound();
buf_barrier.offset = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferMemoryBarrier-size-01189");
buf_barrier.size = buffer.create_info().size + 1;
// Size greater than total size
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0,
nullptr);
m_errorMonitor->VerifyFound();
// Now exercise barrier aspect bit errors, first DS
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207");
VkDepthStencilObj ds_image(m_device);
ds_image.Init(m_device, 128, 128, depth_format);
ASSERT_TRUE(ds_image.initialized());
img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.image = ds_image.handle();
// Not having DEPTH or STENCIL set is an error
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Having only one of depth or stencil set for DS image is an error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-image-01207");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Having anything other than DEPTH and STENCIL is an error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// Now test depth-only
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &format_props);
if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
VkDepthStencilObj d_image(m_device);
d_image.Init(m_device, 128, 128, VK_FORMAT_D16_UNORM);
ASSERT_TRUE(d_image.initialized());
img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.image = d_image.handle();
// DEPTH bit must be set
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set.");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// No bits other than DEPTH may be set
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set.");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
}
// Now test stencil-only
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &format_props);
if (format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT) {
VkDepthStencilObj s_image(m_device);
s_image.Init(m_device, 128, 128, VK_FORMAT_S8_UINT);
ASSERT_TRUE(s_image.initialized());
img_barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.image = s_image.handle();
// Use of COLOR aspect on depth image is error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set.");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
}
// Finally test color
VkImageObj c_image(m_device);
c_image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(c_image.initialized());
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.image = c_image.handle();
// COLOR bit must be set
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// No bits other than COLOR may be set
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set.");
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
// A barrier's new and old VkImageLayout must be compatible with an image's VkImageUsageFlags.
{
VkImageObj img_color(m_device);
img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_color.initialized());
VkImageObj img_ds(m_device);
img_ds.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_ds.initialized());
VkImageObj img_xfer_src(m_device);
img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_xfer_src.initialized());
VkImageObj img_xfer_dst(m_device);
img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_xfer_dst.initialized());
VkImageObj img_sampled(m_device);
img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_sampled.initialized());
VkImageObj img_input(m_device);
img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_input.initialized());
const struct {
VkImageObj &image_obj;
VkImageLayout bad_layout;
std::string msg_code;
} bad_buffer_layouts[] = {
// clang-format off
// images _without_ VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
{img_ds, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"},
{img_xfer_src, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"},
{img_xfer_dst, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"},
{img_sampled, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"},
{img_input, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01208"},
// images _without_ VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
{img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"},
{img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"},
{img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"},
{img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"},
{img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01209"},
{img_color, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"},
{img_xfer_src, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"},
{img_xfer_dst, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"},
{img_sampled, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"},
{img_input, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01210"},
// images _without_ VK_IMAGE_USAGE_SAMPLED_BIT or VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT
{img_color, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"},
{img_ds, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"},
{img_xfer_src, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"},
{img_xfer_dst, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01211"},
// images _without_ VK_IMAGE_USAGE_TRANSFER_SRC_BIT
{img_color, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"},
{img_ds, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"},
{img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"},
{img_sampled, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"},
{img_input, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01212"},
// images _without_ VK_IMAGE_USAGE_TRANSFER_DST_BIT
{img_color, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"},
{img_ds, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"},
{img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"},
{img_sampled, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"},
{img_input, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, "VUID-VkImageMemoryBarrier-oldLayout-01213"},
// clang-format on
};
const uint32_t layout_count = sizeof(bad_buffer_layouts) / sizeof(bad_buffer_layouts[0]);
for (uint32_t i = 0; i < layout_count; ++i) {
img_barrier.image = bad_buffer_layouts[i].image_obj.handle();
const VkImageUsageFlags usage = bad_buffer_layouts[i].image_obj.usage();
img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)
: VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.oldLayout = bad_buffer_layouts[i].bad_layout;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code);
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = bad_buffer_layouts[i].bad_layout;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_buffer_layouts[i].msg_code);
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&img_barrier);
m_errorMonitor->VerifyFound();
}
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
}
// Attempt barrier where srcAccessMask is not supported by srcStageMask
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184");
// Have lower-order bit that's supported (shader write), but higher-order bit not supported to verify multi-bit validation
buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_WRITE_BIT;
buf_barrier.offset = 0;
buf_barrier.size = VK_WHOLE_SIZE;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr);
m_errorMonitor->VerifyFound();
// Attempt barrier where dsAccessMask is not supported by dstStageMask
buf_barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0,
nullptr);
m_errorMonitor->VerifyFound();
// Attempt to mismatch barriers/waitEvents calls with incompatible queues
// Create command pool with incompatible queueflags
const std::vector<VkQueueFamilyProperties> queue_props = m_device->queue_props;
uint32_t queue_family_index = m_device->QueueFamilyMatching(VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT);
if (queue_family_index == UINT32_MAX) {
printf("%s No non-compute queue supporting graphics found; skipped.\n", kSkipPrefix);
return; // NOTE: this exits the test function!
}
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPipelineBarrier-srcStageMask-01183");
VkCommandPoolObj command_pool(m_device, queue_family_index, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj bad_command_buffer(m_device, &command_pool);
bad_command_buffer.begin();
buf_barrier.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT;
// Set two bits that should both be supported as a bonus positive check
buf_barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT;
vkCmdPipelineBarrier(bad_command_buffer.handle(), VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buf_barrier, 0, nullptr);
m_errorMonitor->VerifyFound();
// Check for error for trying to wait on pipeline stage not supported by this queue. Specifically since our queue is not a
// compute queue, vkCmdWaitEvents cannot have it's source stage mask be VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdWaitEvents-srcStageMask-01164");
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event);
vkCmdWaitEvents(bad_command_buffer.handle(), 1, &event, /*source stage mask*/ VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, nullptr, 0, nullptr, 0, nullptr);
m_errorMonitor->VerifyFound();
bad_command_buffer.end();
vkDestroyEvent(m_device->device(), event, nullptr);
}
// Helpers for the tests below
static void ValidOwnershipTransferOp(ErrorMonitor *monitor, VkCommandBufferObj *cb, VkPipelineStageFlags src_stages,
VkPipelineStageFlags dst_stages, const VkBufferMemoryBarrier *buf_barrier,
const VkImageMemoryBarrier *img_barrier) {
monitor->ExpectSuccess();
cb->begin();
uint32_t num_buf_barrier = (buf_barrier) ? 1 : 0;
uint32_t num_img_barrier = (img_barrier) ? 1 : 0;
cb->PipelineBarrier(src_stages, dst_stages, 0, 0, nullptr, num_buf_barrier, buf_barrier, num_img_barrier, img_barrier);
cb->end();
cb->QueueCommandBuffer(); // Implicitly waits
monitor->VerifyNotFound();
}
static void ValidOwnershipTransfer(ErrorMonitor *monitor, VkCommandBufferObj *cb_from, VkCommandBufferObj *cb_to,
VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages,
const VkBufferMemoryBarrier *buf_barrier, const VkImageMemoryBarrier *img_barrier) {
ValidOwnershipTransferOp(monitor, cb_from, src_stages, dst_stages, buf_barrier, img_barrier);
ValidOwnershipTransferOp(monitor, cb_to, src_stages, dst_stages, buf_barrier, img_barrier);
}
TEST_F(VkPositiveLayerTest, OwnershipTranfersImage) {
TEST_DESCRIPTION("Valid image ownership transfers that shouldn't create errors");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (no_gfx == UINT32_MAX) {
printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix);
return;
}
VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get();
VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue);
// Create an "exclusive" image owned by the graphics queue.
VkImageObj image(m_device);
VkFlags image_use = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, image_use, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
auto image_subres = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1);
auto image_barrier = image.image_memory_barrier(0, 0, image.Layout(), image.Layout(), image_subres);
image_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_;
image_barrier.dstQueueFamilyIndex = no_gfx;
ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, nullptr, &image_barrier);
// Change layouts while changing ownership
image_barrier.srcQueueFamilyIndex = no_gfx;
image_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
image_barrier.oldLayout = image.Layout();
// Make sure the new layout is different from the old
if (image_barrier.oldLayout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) {
image_barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
} else {
image_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
}
ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, nullptr, &image_barrier);
}
TEST_F(VkPositiveLayerTest, OwnershipTranfersBuffer) {
TEST_DESCRIPTION("Valid buffer ownership transfers that shouldn't create errors");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
uint32_t no_gfx = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (no_gfx == UINT32_MAX) {
printf("%s Required queue families not present (non-graphics capable required).\n", kSkipPrefix);
return;
}
VkQueueObj *no_gfx_queue = m_device->queue_family_queues(no_gfx)[0].get();
VkCommandPoolObj no_gfx_pool(m_device, no_gfx, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj no_gfx_cb(m_device, &no_gfx_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, no_gfx_queue);
// Create a buffer
const VkDeviceSize buffer_size = 256;
uint8_t data[buffer_size] = {0xFF};
VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT);
ASSERT_TRUE(buffer.initialized());
auto buffer_barrier = buffer.buffer_memory_barrier(0, 0, 0, VK_WHOLE_SIZE);
// Let gfx own it.
buffer_barrier.srcQueueFamilyIndex = m_device->graphics_queue_node_index_;
buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
ValidOwnershipTransferOp(m_errorMonitor, m_commandBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
&buffer_barrier, nullptr);
// Transfer it to non-gfx
buffer_barrier.dstQueueFamilyIndex = no_gfx;
ValidOwnershipTransfer(m_errorMonitor, m_commandBuffer, &no_gfx_cb, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT, &buffer_barrier, nullptr);
// Transfer it to gfx
buffer_barrier.srcQueueFamilyIndex = no_gfx;
buffer_barrier.dstQueueFamilyIndex = m_device->graphics_queue_node_index_;
ValidOwnershipTransfer(m_errorMonitor, &no_gfx_cb, m_commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, &buffer_barrier, nullptr);
}
class BarrierQueueFamilyTestHelper {
public:
struct QueueFamilyObjs {
uint32_t index;
// We would use std::unique_ptr, but this triggers a compiler error on older compilers
VkQueueObj *queue = nullptr;
VkCommandPoolObj *command_pool = nullptr;
VkCommandBufferObj *command_buffer = nullptr;
VkCommandBufferObj *command_buffer2 = nullptr;
~QueueFamilyObjs() {
delete command_buffer2;
delete command_buffer;
delete command_pool;
delete queue;
}
void Init(VkDeviceObj *device, uint32_t qf_index, VkQueue qf_queue, VkCommandPoolCreateFlags cp_flags) {
index = qf_index;
queue = new VkQueueObj(qf_queue, qf_index);
command_pool = new VkCommandPoolObj(device, qf_index, cp_flags);
command_buffer = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue);
command_buffer2 = new VkCommandBufferObj(device, command_pool, VK_COMMAND_BUFFER_LEVEL_PRIMARY, queue);
};
};
struct Context {
VkLayerTest *layer_test;
uint32_t default_index;
std::unordered_map<uint32_t, QueueFamilyObjs> queue_families;
Context(VkLayerTest *test, const std::vector<uint32_t> &queue_family_indices) : layer_test(test) {
if (0 == queue_family_indices.size()) {
return; // This is invalid
}
VkDeviceObj *device_obj = layer_test->DeviceObj();
queue_families.reserve(queue_family_indices.size());
default_index = queue_family_indices[0];
for (auto qfi : queue_family_indices) {
VkQueue queue = device_obj->queue_family_queues(qfi)[0]->handle();
queue_families.emplace(std::make_pair(qfi, QueueFamilyObjs()));
queue_families[qfi].Init(device_obj, qfi, queue, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
}
Reset();
}
void Reset() {
layer_test->DeviceObj()->wait();
for (auto &qf : queue_families) {
vkResetCommandPool(layer_test->device(), qf.second.command_pool->handle(), 0);
}
}
};
BarrierQueueFamilyTestHelper(Context *context) : context_(context), image_(context->layer_test->DeviceObj()) {}
// Init with queue families non-null for CONCURRENT sharing mode (which requires them)
void Init(std::vector<uint32_t> *families) {
VkDeviceObj *device_obj = context_->layer_test->DeviceObj();
image_.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0, families);
ASSERT_TRUE(image_.initialized());
image_barrier_ =
image_.image_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, image_.Layout(), image_.Layout(),
image_.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1));
VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
buffer_.init_as_src_and_dst(*device_obj, 256, mem_prop, families);
ASSERT_TRUE(buffer_.initialized());
buffer_barrier_ = buffer_.buffer_memory_barrier(VK_ACCESS_TRANSFER_READ_BIT, VK_ACCESS_TRANSFER_READ_BIT, 0, VK_WHOLE_SIZE);
}
QueueFamilyObjs *GetQueueFamilyInfo(Context *context, uint32_t qfi) {
QueueFamilyObjs *qf;
auto qf_it = context->queue_families.find(qfi);
if (qf_it != context->queue_families.end()) {
qf = &(qf_it->second);
} else {
qf = &(context->queue_families[context->default_index]);
}
return qf;
}
enum Modifier {
NONE,
DOUBLE_RECORD,
DOUBLE_COMMAND_BUFFER,
};
void operator()(std::string img_err, std::string buf_err, uint32_t src, uint32_t dst, bool positive = false,
uint32_t queue_family_index = kInvalidQueueFamily, Modifier mod = Modifier::NONE) {
auto monitor = context_->layer_test->Monitor();
monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, img_err);
monitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT, buf_err);
image_barrier_.srcQueueFamilyIndex = src;
image_barrier_.dstQueueFamilyIndex = dst;
buffer_barrier_.srcQueueFamilyIndex = src;
buffer_barrier_.dstQueueFamilyIndex = dst;
QueueFamilyObjs *qf = GetQueueFamilyInfo(context_, queue_family_index);
VkCommandBufferObj *command_buffer = qf->command_buffer;
for (int cb_repeat = 0; cb_repeat < (mod == Modifier::DOUBLE_COMMAND_BUFFER ? 2 : 1); cb_repeat++) {
command_buffer->begin();
for (int repeat = 0; repeat < (mod == Modifier::DOUBLE_RECORD ? 2 : 1); repeat++) {
vkCmdPipelineBarrier(command_buffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 1, &buffer_barrier_, 1, &image_barrier_);
}
command_buffer->end();
command_buffer = qf->command_buffer2; // Second pass (if any) goes to the secondary command_buffer.
}
if (queue_family_index != kInvalidQueueFamily) {
if (mod == Modifier::DOUBLE_COMMAND_BUFFER) {
// the Fence resolves to VK_NULL_HANLE... i.e. no fence
qf->queue->submit({{qf->command_buffer, qf->command_buffer2}}, vk_testing::Fence(), positive);
} else {
qf->command_buffer->QueueCommandBuffer(positive); // Check for success on positive tests only
}
}
if (positive) {
monitor->VerifyNotFound();
} else {
monitor->VerifyFound();
}
context_->Reset();
};
protected:
static const uint32_t kInvalidQueueFamily = UINT32_MAX;
Context *context_;
VkImageObj image_;
VkImageMemoryBarrier image_barrier_;
VkBufferObj buffer_;
VkBufferMemoryBarrier buffer_barrier_;
};
TEST_F(VkLayerTest, InvalidBarrierQueueFamily) {
TEST_DESCRIPTION("Create and submit barriers with invalid queue families");
ASSERT_NO_FATAL_FAILURE(Init(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
// Find queues of two families
const uint32_t submit_family = m_device->graphics_queue_node_index_;
const uint32_t invalid = static_cast<uint32_t>(m_device->queue_props.size());
const uint32_t other_family = submit_family != 0 ? 0 : 1;
const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0);
std::vector<uint32_t> qf_indices{{submit_family, other_family}};
if (only_one_family) {
qf_indices.resize(1);
}
BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices);
if (m_device->props.apiVersion >= VK_API_VERSION_1_1) {
printf(
"%s Device has apiVersion greater than 1.0 -- skipping test cases that require external memory "
"to be "
"disabled.\n",
kSkipPrefix);
} else {
if (only_one_family) {
printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix);
} else {
std::vector<uint32_t> families = {submit_family, other_family};
BarrierQueueFamilyTestHelper conc_test(&test_context);
conc_test.Init(&families);
// core_validation::barrier_queue_families::kSrcAndDestMustBeIgnore
conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED,
submit_family);
conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family,
VK_QUEUE_FAMILY_IGNORED);
conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", submit_family,
submit_family);
// true -> positive test
conc_test("VUID-VkImageMemoryBarrier-image-01199", "VUID-VkBufferMemoryBarrier-buffer-01190", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED, true);
}
BarrierQueueFamilyTestHelper excl_test(&test_context);
excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode.
// core_validation::barrier_queue_families::kBothIgnoreOrBothValid
excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED,
submit_family);
excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family,
VK_QUEUE_FAMILY_IGNORED);
// true -> positive test
excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", submit_family, submit_family,
true);
excl_test("VUID-VkImageMemoryBarrier-image-01200", "VUID-VkBufferMemoryBarrier-buffer-01192", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED, true);
}
if (only_one_family) {
printf("%s Single queue family found -- VK_SHARING_MODE_EXCLUSIVE submit testcases skipped.\n", kSkipPrefix);
} else {
BarrierQueueFamilyTestHelper excl_test(&test_context);
excl_test.Init(nullptr);
// core_validation::barrier_queue_families::kSubmitQueueMustMatchSrcOrDst
excl_test("VUID-VkImageMemoryBarrier-image-01205", "VUID-VkBufferMemoryBarrier-buffer-01196", other_family, other_family,
false, submit_family);
// true -> positive test (testing both the index logic and the QFO transfer tracking.
excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family);
excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, other_family);
excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, other_family);
excl_test("POSITIVE_TEST", "POSITIVE_TEST", other_family, submit_family, true, submit_family);
// negative testing for QFO transfer tracking
// Duplicate release in one CB
excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family,
other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD);
// Duplicate pending release
excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00003", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00003", submit_family,
other_family, false, submit_family);
// Duplicate acquire in one CB
excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00001", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00001", submit_family,
other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_RECORD);
// No pending release
excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00004", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00004", submit_family,
other_family, false, other_family);
// Duplicate release in two CB
excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family,
other_family, false, submit_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER);
// Duplicate acquire in two CB
excl_test("POSITIVE_TEST", "POSITIVE_TEST", submit_family, other_family, true, submit_family); // need a succesful release
excl_test("UNASSIGNED-VkImageMemoryBarrier-image-00002", "UNASSIGNED-VkBufferMemoryBarrier-buffer-00002", submit_family,
other_family, false, other_family, BarrierQueueFamilyTestHelper::DOUBLE_COMMAND_BUFFER);
}
}
TEST_F(VkLayerTest, InvalidBarrierQueueFamilyWithMemExt) {
TEST_DESCRIPTION("Create and submit barriers with invalid queue families when memory extension is enabled ");
std::vector<const char *> reqd_instance_extensions = {
{VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}};
for (auto extension_name : reqd_instance_extensions) {
if (InstanceExtensionSupported(extension_name)) {
m_instance_extension_names.push_back(extension_name);
} else {
printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name);
return;
}
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for external memory device extensions
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
} else {
printf("%s External memory extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
// Find queues of two families
const uint32_t submit_family = m_device->graphics_queue_node_index_;
const uint32_t invalid = static_cast<uint32_t>(m_device->queue_props.size());
const uint32_t other_family = submit_family != 0 ? 0 : 1;
const bool only_one_family = (invalid == 1) || (m_device->queue_props[other_family].queueCount == 0);
std::vector<uint32_t> qf_indices{{submit_family, other_family}};
if (only_one_family) {
qf_indices.resize(1);
}
BarrierQueueFamilyTestHelper::Context test_context(this, qf_indices);
if (only_one_family) {
printf("%s Single queue family found -- VK_SHARING_MODE_CONCURRENT testcases skipped.\n", kSkipPrefix);
} else {
std::vector<uint32_t> families = {submit_family, other_family};
BarrierQueueFamilyTestHelper conc_test(&test_context);
// core_validation::barrier_queue_families::kSrcOrDstMustBeIgnore
conc_test.Init(&families);
conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", submit_family, submit_family);
// true -> positive test
conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED, true);
conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_EXTERNAL_KHR, true);
conc_test("VUID-VkImageMemoryBarrier-image-01381", "VUID-VkBufferMemoryBarrier-buffer-01191", VK_QUEUE_FAMILY_EXTERNAL_KHR,
VK_QUEUE_FAMILY_IGNORED, true);
// core_validation::barrier_queue_families::kSpecialOrIgnoreOnly
conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", submit_family,
VK_QUEUE_FAMILY_IGNORED);
conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED,
submit_family);
// This is to flag the errors that would be considered only "unexpected" in the parallel case above
// true -> positive test
conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_EXTERNAL_KHR, true);
conc_test("VUID-VkImageMemoryBarrier-image-01766", "VUID-VkBufferMemoryBarrier-buffer-01763", VK_QUEUE_FAMILY_EXTERNAL_KHR,
VK_QUEUE_FAMILY_IGNORED, true);
}
BarrierQueueFamilyTestHelper excl_test(&test_context);
excl_test.Init(nullptr); // no queue families means *exclusive* sharing mode.
// core_validation::barrier_queue_families::kSrcIgnoreRequiresDstIgnore
excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED,
submit_family);
excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_EXTERNAL_KHR);
// true -> positive test
excl_test("VUID-VkImageMemoryBarrier-image-01201", "VUID-VkBufferMemoryBarrier-buffer-01193", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED, true);
// core_validation::barrier_queue_families::kDstValidOrSpecialIfNotIgnore
excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, invalid);
// true -> positive test
excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family, submit_family,
true);
excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family,
VK_QUEUE_FAMILY_IGNORED, true);
excl_test("VUID-VkImageMemoryBarrier-image-01768", "VUID-VkBufferMemoryBarrier-buffer-01765", submit_family,
VK_QUEUE_FAMILY_EXTERNAL_KHR, true);
// core_validation::barrier_queue_families::kSrcValidOrSpecialIfNotIgnore
excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", invalid, submit_family);
// true -> positive test
excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", submit_family, submit_family,
true);
excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED, true);
excl_test("VUID-VkImageMemoryBarrier-image-01767", "VUID-VkBufferMemoryBarrier-buffer-01764", VK_QUEUE_FAMILY_EXTERNAL_KHR,
submit_family, true);
}
TEST_F(VkLayerTest, ImageBarrierWithBadRange) {
TEST_DESCRIPTION("VkImageMemoryBarrier with an invalid subresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
VkImageMemoryBarrier img_barrier_template = {};
img_barrier_template.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier_template.pNext = NULL;
img_barrier_template.srcAccessMask = 0;
img_barrier_template.dstAccessMask = 0;
img_barrier_template.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_barrier_template.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier_template.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier_template.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier_template.image = image.handle();
// subresourceRange to be set later for the for the purposes of this test
img_barrier_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier_template.subresourceRange.baseArrayLayer = 0;
img_barrier_template.subresourceRange.baseMipLevel = 0;
img_barrier_template.subresourceRange.layerCount = 0;
img_barrier_template.subresourceRange.levelCount = 0;
m_commandBuffer->begin();
// Nested scope here confuses clang-format, somehow
// clang-format off
// try for vkCmdPipelineBarrier
{
// Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try levelCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel + levelCount > image.mipLevels
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try layerCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer + layerCount > image.arrayLayers
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
}
// try for vkCmdWaitEvents
{
VkEvent event;
VkEventCreateInfo eci{VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, NULL, 0};
VkResult err = vkCreateEvent(m_device->handle(), &eci, nullptr, &event);
ASSERT_VK_SUCCESS(err);
// Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01486");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try levelCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel + levelCount > image.mipLevels
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01724");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01488");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try layerCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer + layerCount > image.arrayLayers
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-subresourceRange-01725");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2};
VkImageMemoryBarrier img_barrier = img_barrier_template;
img_barrier.subresourceRange = range;
vkCmdWaitEvents(m_commandBuffer->handle(), 1, &event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0, nullptr, 1, &img_barrier);
m_errorMonitor->VerifyFound();
}
vkDestroyEvent(m_device->handle(), event, nullptr);
}
// clang-format on
}
TEST_F(VkLayerTest, ValidationCacheTestBadMerge) {
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), "VK_LAYER_LUNARG_core_validation", VK_EXT_VALIDATION_CACHE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Load extension functions
auto fpCreateValidationCache =
(PFN_vkCreateValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkCreateValidationCacheEXT");
auto fpDestroyValidationCache =
(PFN_vkDestroyValidationCacheEXT)vkGetDeviceProcAddr(m_device->device(), "vkDestroyValidationCacheEXT");
auto fpMergeValidationCaches =
(PFN_vkMergeValidationCachesEXT)vkGetDeviceProcAddr(m_device->device(), "vkMergeValidationCachesEXT");
if (!fpCreateValidationCache || !fpDestroyValidationCache || !fpMergeValidationCaches) {
printf("%s Failed to load function pointers for %s\n", kSkipPrefix, VK_EXT_VALIDATION_CACHE_EXTENSION_NAME);
return;
}
VkValidationCacheCreateInfoEXT validationCacheCreateInfo;
validationCacheCreateInfo.sType = VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT;
validationCacheCreateInfo.pNext = NULL;
validationCacheCreateInfo.initialDataSize = 0;
validationCacheCreateInfo.pInitialData = NULL;
validationCacheCreateInfo.flags = 0;
VkValidationCacheEXT validationCache = VK_NULL_HANDLE;
VkResult res = fpCreateValidationCache(m_device->device(), &validationCacheCreateInfo, nullptr, &validationCache);
ASSERT_VK_SUCCESS(res);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkMergeValidationCachesEXT-dstCache-01536");
res = fpMergeValidationCaches(m_device->device(), validationCache, 1, &validationCache);
m_errorMonitor->VerifyFound();
fpDestroyValidationCache(m_device->device(), validationCache, nullptr);
}
TEST_F(VkPositiveLayerTest, LayoutFromPresentWithoutAccessMemoryRead) {
// Transition an image away from PRESENT_SRC_KHR without ACCESS_MEMORY_READ
// in srcAccessMask.
// The required behavior here was a bit unclear in earlier versions of the
// spec, but there is no memory dependency required here, so this should
// work without warnings.
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageMemoryBarrier barrier = {};
VkImageSubresourceRange range;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
barrier.dstAccessMask = 0;
barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.image = image.handle();
range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
range.baseMipLevel = 0;
range.levelCount = 1;
range.baseArrayLayer = 0;
range.layerCount = 1;
barrier.subresourceRange = range;
VkCommandBufferObj cmdbuf(m_device, m_commandPool);
cmdbuf.begin();
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
barrier.oldLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, IdxBufferAlignmentError) {
// Bind a BeginRenderPass within an active RenderPass
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
uint32_t const indices[] = {0};
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.size = 1024;
buf_info.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
buf_info.queueFamilyIndexCount = 1;
buf_info.pQueueFamilyIndices = indices;
VkBuffer buffer;
VkResult err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements requirements;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &requirements);
VkMemoryAllocateInfo alloc_info{};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
alloc_info.allocationSize = requirements.size;
bool pass = m_device->phy().set_memory_type(requirements.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
ASSERT_TRUE(pass);
VkDeviceMemory memory;
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, memory, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
ASSERT_VK_SUCCESS(err);
// vkCmdBindPipeline(m_commandBuffer->handle(),
// VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// Should error before calling to driver so don't care about actual data
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdBindIndexBuffer() offset (0x7) does not fall on ");
vkCmdBindIndexBuffer(m_commandBuffer->handle(), buffer, 7, VK_INDEX_TYPE_UINT16);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), memory, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
}
TEST_F(VkLayerTest, InvalidQueueFamilyIndex) {
// Create an out-of-range queueFamilyIndex
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buffCI.queueFamilyIndexCount = 2;
// Introduce failure by specifying invalid queue_family_index
uint32_t qfi[2];
qfi[0] = 777;
qfi[1] = 0;
buffCI.pQueueFamilyIndices = qfi;
buffCI.sharingMode = VK_SHARING_MODE_CONCURRENT; // qfi only matters in CONCURRENT mode
VkBuffer ib;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreateBuffer: pCreateInfo->pQueueFamilyIndices[0] (= 777) is not one of the queue "
"families given via VkDeviceQueueCreateInfo structures when the device was created.");
vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib);
m_errorMonitor->VerifyFound();
if (m_device->queue_props.size() > 2) {
VkBuffer ib2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "which was not created allowing concurrent");
// Create buffer shared to queue families 1 and 2, but submitted on queue family 0
buffCI.queueFamilyIndexCount = 2;
qfi[0] = 1;
qfi[1] = 2;
vkCreateBuffer(m_device->device(), &buffCI, NULL, &ib2);
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device->device(), ib2, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = mem_reqs.size;
bool pass = false;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
printf("%s Failed to allocate required memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), ib2, NULL);
return;
}
vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
vkBindBufferMemory(m_device->device(), ib2, mem, 0);
m_commandBuffer->begin();
vkCmdFillBuffer(m_commandBuffer->handle(), ib2, 0, 16, 5);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
vkDestroyBuffer(m_device->device(), ib2, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
}
}
TEST_F(VkLayerTest, ExecuteCommandsPrimaryCB) {
TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a primary command buffer (should only be secondary)");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// An empty primary command buffer
VkCommandBufferObj cb(m_device, m_commandPool);
cb.begin();
cb.end();
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &renderPassBeginInfo(), VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
VkCommandBuffer handle = cb.handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdExecuteCommands() called w/ Primary Cmd Buffer ");
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &handle);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetUnexpectedError("All elements of pCommandBuffers must not be in the pending state");
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, DSUsageBitsErrors) {
TEST_DESCRIPTION("Attempt to update descriptor sets for images and buffers that do not have correct usage bits sets.");
ASSERT_NO_FATAL_FAILURE(Init());
std::array<VkDescriptorPoolSize, VK_DESCRIPTOR_TYPE_RANGE_SIZE> ds_type_count;
for (uint32_t i = 0; i < ds_type_count.size(); ++i) {
ds_type_count[i].type = VkDescriptorType(i);
ds_type_count[i].descriptorCount = 1;
}
vk_testing::DescriptorPool ds_pool;
ds_pool.init(*m_device, vk_testing::DescriptorPool::create_info(0, VK_DESCRIPTOR_TYPE_RANGE_SIZE, ds_type_count));
ASSERT_TRUE(ds_pool.initialized());
std::vector<VkDescriptorSetLayoutBinding> dsl_bindings(1);
dsl_bindings[0].binding = 0;
dsl_bindings[0].descriptorType = VkDescriptorType(0);
dsl_bindings[0].descriptorCount = 1;
dsl_bindings[0].stageFlags = VK_SHADER_STAGE_ALL;
dsl_bindings[0].pImmutableSamplers = NULL;
// Create arrays of layout and descriptor objects
using UpDescriptorSet = std::unique_ptr<vk_testing::DescriptorSet>;
std::vector<UpDescriptorSet> descriptor_sets;
using UpDescriptorSetLayout = std::unique_ptr<VkDescriptorSetLayoutObj>;
std::vector<UpDescriptorSetLayout> ds_layouts;
descriptor_sets.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE);
ds_layouts.reserve(VK_DESCRIPTOR_TYPE_RANGE_SIZE);
for (uint32_t i = 0; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) {
dsl_bindings[0].descriptorType = VkDescriptorType(i);
ds_layouts.push_back(UpDescriptorSetLayout(new VkDescriptorSetLayoutObj(m_device, dsl_bindings)));
descriptor_sets.push_back(UpDescriptorSet(ds_pool.alloc_sets(*m_device, *ds_layouts.back())));
ASSERT_TRUE(descriptor_sets.back()->initialized());
}
// Create a buffer & bufferView to be used for invalid updates
const VkDeviceSize buffer_size = 256;
uint8_t data[buffer_size];
VkConstantBufferObj buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT);
VkConstantBufferObj storage_texel_buffer(m_device, buffer_size, data, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT);
ASSERT_TRUE(buffer.initialized() && storage_texel_buffer.initialized());
auto buff_view_ci = vk_testing::BufferView::createInfo(buffer.handle(), VK_FORMAT_R8_UNORM);
vk_testing::BufferView buffer_view_obj, storage_texel_buffer_view_obj;
buffer_view_obj.init(*m_device, buff_view_ci);
buff_view_ci.buffer = storage_texel_buffer.handle();
storage_texel_buffer_view_obj.init(*m_device, buff_view_ci);
ASSERT_TRUE(buffer_view_obj.initialized() && storage_texel_buffer_view_obj.initialized());
VkBufferView buffer_view = buffer_view_obj.handle();
VkBufferView storage_texel_buffer_view = storage_texel_buffer_view_obj.handle();
// Create an image to be used for invalid updates
VkImageObj image_obj(m_device);
image_obj.InitNoLayout(64, 64, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image_obj.initialized());
VkImageView image_view = image_obj.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkDescriptorBufferInfo buff_info = {};
buff_info.buffer = buffer.handle();
VkDescriptorImageInfo img_info = {};
img_info.imageView = image_view;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = &buffer_view;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = &img_info;
// These error messages align with VkDescriptorType struct
std::string error_codes[] = {
"VUID-VkWriteDescriptorSet-descriptorType-00326", // placeholder, no error for SAMPLER descriptor
"VUID-VkWriteDescriptorSet-descriptorType-00326", // COMBINED_IMAGE_SAMPLER
"VUID-VkWriteDescriptorSet-descriptorType-00326", // SAMPLED_IMAGE
"VUID-VkWriteDescriptorSet-descriptorType-00326", // STORAGE_IMAGE
"VUID-VkWriteDescriptorSet-descriptorType-00334", // UNIFORM_TEXEL_BUFFER
"VUID-VkWriteDescriptorSet-descriptorType-00335", // STORAGE_TEXEL_BUFFER
"VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER
"VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER
"VUID-VkWriteDescriptorSet-descriptorType-00330", // UNIFORM_BUFFER_DYNAMIC
"VUID-VkWriteDescriptorSet-descriptorType-00331", // STORAGE_BUFFER_DYNAMIC
"VUID-VkWriteDescriptorSet-descriptorType-00326" // INPUT_ATTACHMENT
};
// Start loop at 1 as SAMPLER desc type has no usage bit error
for (uint32_t i = 1; i < VK_DESCRIPTOR_TYPE_RANGE_SIZE; ++i) {
if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
// Now check for UNIFORM_TEXEL_BUFFER using storage_texel_buffer_view
descriptor_write.pTexelBufferView = &storage_texel_buffer_view;
}
descriptor_write.descriptorType = VkDescriptorType(i);
descriptor_write.dstSet = descriptor_sets[i]->handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_codes[i]);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
if (VkDescriptorType(i) == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
descriptor_write.pTexelBufferView = &buffer_view;
}
}
}
TEST_F(VkLayerTest, DSBufferInfoErrors) {
TEST_DESCRIPTION(
"Attempt to update buffer descriptor set that has incorrect parameters in VkDescriptorBufferInfo struct. This includes:\n"
"1. offset value greater than or equal to buffer size\n"
"2. range value of 0\n"
"3. range value greater than buffer (size - offset)");
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
// Create a buffer to be used for invalid updates
VkBufferCreateInfo buff_ci = {};
buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buff_ci.size = m_device->props.limits.minUniformBufferOffsetAlignment;
buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
// Have to bind memory to buffer before descriptor update
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = mem_reqs.size;
mem_alloc.memoryTypeIndex = 0;
bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
VkDescriptorBufferInfo buff_info = {};
buff_info.buffer = buffer;
// Cause error due to offset out of range
buff_info.offset = buff_ci.size;
buff_info.range = VK_WHOLE_SIZE;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.dstSet = ds.set_;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorBufferInfo-offset-00340");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
// Now cause error due to range of 0
buff_info.offset = 0;
buff_info.range = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorBufferInfo-range-00341");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
// Now cause error due to range exceeding buffer size - offset
buff_info.offset = 0;
buff_info.range = buff_ci.size + 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorBufferInfo-range-00342");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
}
TEST_F(VkLayerTest, DSBufferLimitErrors) {
TEST_DESCRIPTION(
"Attempt to update buffer descriptor set that has VkDescriptorBufferInfo values that violate device limits.\n"
"Test cases include:\n"
"1. range of uniform buffer update exceeds maxUniformBufferRange\n"
"2. offset of uniform buffer update is not multiple of minUniformBufferOffsetAlignment\n"
"3. range of storage buffer update exceeds maxStorageBufferRange\n"
"4. offset of storage buffer update is not multiple of minStorageBufferOffsetAlignment");
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
struct TestCase {
VkDescriptorType descriptor_type;
VkBufferUsageFlagBits buffer_usage;
VkDeviceSize max_range;
std::string max_range_vu;
VkDeviceSize min_align;
std::string min_align_vu;
};
for (const auto &test_case : {
TestCase({VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
m_device->props.limits.maxUniformBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00332",
m_device->props.limits.minUniformBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00327"}),
TestCase({VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT,
m_device->props.limits.maxStorageBufferRange, "VUID-VkWriteDescriptorSet-descriptorType-00333",
m_device->props.limits.minStorageBufferOffsetAlignment, "VUID-VkWriteDescriptorSet-descriptorType-00328"}),
}) {
// Create layout with single buffer
OneOffDescriptorSet ds(m_device, {
{0, test_case.descriptor_type, 1, VK_SHADER_STAGE_ALL, nullptr},
});
// Create a buffer to be used for invalid updates
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.usage = test_case.buffer_usage;
bci.size = test_case.max_range + test_case.min_align; // Make buffer bigger than range limit
bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &bci, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
// Have to bind memory to buffer before descriptor update
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = mem_reqs.size;
bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
continue;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
if (VK_SUCCESS != err) {
printf("%s Failed to allocate memory in DSBufferLimitErrors; skipped.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
continue;
}
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
VkDescriptorBufferInfo buff_info = {};
buff_info.buffer = buffer;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = test_case.descriptor_type;
descriptor_write.dstSet = ds.set_;
// Exceed range limit
if (test_case.max_range != UINT32_MAX) {
buff_info.range = test_case.max_range + 1;
buff_info.offset = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.max_range_vu);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
}
// Reduce size of range to acceptable limit and cause offset error
if (test_case.min_align > 1) {
buff_info.range = test_case.max_range;
buff_info.offset = test_case.min_align - 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.min_align_vu);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
}
// Cleanup
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
}
}
TEST_F(VkLayerTest, DSAspectBitsErrors) {
// TODO : Initially only catching case where DEPTH & STENCIL aspect bits
// are set, but could expand this test to hit more cases.
TEST_DESCRIPTION("Attempt to update descriptor sets for images that do not have correct aspect bits sets.");
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_ALL, nullptr},
});
// Create an image to be used for invalid updates
VkImageObj image_obj(m_device);
image_obj.Init(64, 64, 1, depth_format, VK_IMAGE_USAGE_SAMPLED_BIT);
ASSERT_TRUE(image_obj.initialized());
VkImage image = image_obj.image();
// Now create view for image
VkImageViewCreateInfo image_view_ci = {};
image_view_ci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_ci.image = image;
image_view_ci.format = depth_format;
image_view_ci.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_ci.subresourceRange.layerCount = 1;
image_view_ci.subresourceRange.baseArrayLayer = 0;
image_view_ci.subresourceRange.levelCount = 1;
// Setting both depth & stencil aspect bits is illegal for an imageView used
// to populate a descriptor set.
image_view_ci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
VkImageView image_view;
err = vkCreateImageView(m_device->device(), &image_view_ci, NULL, &image_view);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo img_info = {};
img_info.imageView = image_view;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = NULL;
descriptor_write.pBufferInfo = NULL;
descriptor_write.pImageInfo = &img_info;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
descriptor_write.dstSet = ds.set_;
// TODO(whenning42): Update this check to look for a VUID when this error is
// assigned one.
const char *error_msg = " please only set either VK_IMAGE_ASPECT_DEPTH_BIT or VK_IMAGE_ASPECT_STENCIL_BIT ";
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error_msg);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroyImageView(m_device->device(), image_view, NULL);
}
TEST_F(VkLayerTest, DSTypeMismatch) {
// Create DS w/ layout of one type and attempt Update w/ mis-matched type
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
" binding #0 with type VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER but update type is VK_DESCRIPTOR_TYPE_SAMPLER");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo info = {};
info.sampler = sampler;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.descriptorCount = 1;
// This is a mismatched type for the layout which expects BUFFER
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
descriptor_write.pImageInfo = &info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroySampler(m_device->device(), sampler, NULL);
}
TEST_F(VkLayerTest, DSUpdateOutOfBounds) {
// For overlapping Update, have arrayIndex exceed that of layout
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstArrayElement-00321");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
if (!buffer_test.GetBufferCurrent()) {
// Something prevented creation of buffer so abort
printf("%s Buffer creation failed, skipping test\n", kSkipPrefix);
return;
}
// Correctly update descriptor to avoid "NOT_UPDATED" error
VkDescriptorBufferInfo buff_info = {};
buff_info.buffer = buffer_test.GetBuffer();
buff_info.offset = 0;
buff_info.range = 1024;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstArrayElement = 1; /* This index out of bounds for the update */
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.pBufferInfo = &buff_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidDSUpdateIndex) {
// Create layout w/ count of 1 and attempt update to that layout w/ binding index 2
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00315");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo info = {};
info.sampler = sampler;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 2;
descriptor_write.descriptorCount = 1;
// This is the wrong type, but out of bounds will be flagged first
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
descriptor_write.pImageInfo = &info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroySampler(m_device->device(), sampler, NULL);
}
TEST_F(VkLayerTest, DSUpdateEmptyBinding) {
// Create layout w/ empty binding and attempt to update it
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLER, 0 /* !! */, VK_SHADER_STAGE_ALL, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo info = {};
info.sampler = sampler;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1; // Lie here to avoid parameter_validation error
// This is the wrong type, but empty binding error will be flagged first
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
descriptor_write.pImageInfo = &info;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-dstBinding-00316");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroySampler(m_device->device(), sampler, NULL);
}
TEST_F(VkLayerTest, InvalidDSUpdateStruct) {
// Call UpdateDS w/ struct type other than valid VK_STRUCTUR_TYPE_UPDATE_*
// types
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, ".sType must be VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo info = {};
info.sampler = sampler;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; /* Intentionally broken struct type */
descriptor_write.dstSet = ds.set_;
descriptor_write.descriptorCount = 1;
// This is the wrong type, but out of bounds will be flagged first
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
descriptor_write.pImageInfo = &info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroySampler(m_device->device(), sampler, NULL);
}
TEST_F(VkLayerTest, SampleDescriptorUpdateError) {
// Create a single Sampler descriptor and send it an invalid Sampler
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00325");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkSampler sampler = (VkSampler)((size_t)0xbaadbeef); // Sampler with invalid handle
VkDescriptorImageInfo descriptor_info;
memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo));
descriptor_info.sampler = sampler;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
descriptor_write.pImageInfo = &descriptor_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ImageViewDescriptorUpdateError) {
// Create a single combined Image/Sampler descriptor and send it an invalid
// imageView
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkWriteDescriptorSet-descriptorType-00326");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkImageView view = (VkImageView)((size_t)0xbaadbeef); // invalid imageView object
VkDescriptorImageInfo descriptor_info;
memset(&descriptor_info, 0, sizeof(VkDescriptorImageInfo));
descriptor_info.sampler = sampler;
descriptor_info.imageView = view;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &descriptor_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
vkDestroySampler(m_device->device(), sampler, NULL);
}
TEST_F(VkLayerTest, CopyDescriptorUpdateErrors) {
// Create DS w/ layout of 2 types, write update 1 and attempt to copy-update
// into the other
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" binding #1 with type VK_DESCRIPTOR_TYPE_SAMPLER. Types do not match.");
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo info = {};
info.sampler = sampler;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(VkWriteDescriptorSet));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 1; // SAMPLER binding from layout above
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
descriptor_write.pImageInfo = &info;
// This write update should succeed
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Now perform a copy update that fails due to type mismatch
VkCopyDescriptorSet copy_ds_update;
memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet));
copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET;
copy_ds_update.srcSet = ds.set_;
copy_ds_update.srcBinding = 1; // Copy from SAMPLER binding
copy_ds_update.dstSet = ds.set_;
copy_ds_update.dstBinding = 0; // ERROR : copy to UNIFORM binding
copy_ds_update.descriptorCount = 1; // copy 1 descriptor
vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update);
m_errorMonitor->VerifyFound();
// Now perform a copy update that fails due to binding out of bounds
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, " does not have copy update src binding of 3.");
memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet));
copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET;
copy_ds_update.srcSet = ds.set_;
copy_ds_update.srcBinding = 3; // ERROR : Invalid binding for matching layout
copy_ds_update.dstSet = ds.set_;
copy_ds_update.dstBinding = 0;
copy_ds_update.descriptorCount = 1; // Copy 1 descriptor
vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update);
m_errorMonitor->VerifyFound();
// Now perform a copy update that fails due to binding out of bounds
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" binding#1 with offset index of 1 plus update array offset of 0 and update of 5 "
"descriptors oversteps total number of descriptors in set: 2.");
memset(&copy_ds_update, 0, sizeof(VkCopyDescriptorSet));
copy_ds_update.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET;
copy_ds_update.srcSet = ds.set_;
copy_ds_update.srcBinding = 1;
copy_ds_update.dstSet = ds.set_;
copy_ds_update.dstBinding = 0;
copy_ds_update.descriptorCount = 5; // ERROR copy 5 descriptors (out of bounds for layout)
vkUpdateDescriptorSets(m_device->device(), 0, NULL, 1, &copy_ds_update);
m_errorMonitor->VerifyFound();
vkDestroySampler(m_device->device(), sampler, NULL);
}
TEST_F(VkPositiveLayerTest, CopyNonupdatedDescriptors) {
TEST_DESCRIPTION("Copy non-updated descriptors");
unsigned int i;
ASSERT_NO_FATAL_FAILURE(Init());
OneOffDescriptorSet src_ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr},
{2, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
OneOffDescriptorSet dst_ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr},
});
m_errorMonitor->ExpectSuccess();
const unsigned int copy_size = 2;
VkCopyDescriptorSet copy_ds_update[copy_size];
memset(copy_ds_update, 0, sizeof(copy_ds_update));
for (i = 0; i < copy_size; i++) {
copy_ds_update[i].sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET;
copy_ds_update[i].srcSet = src_ds.set_;
copy_ds_update[i].srcBinding = i;
copy_ds_update[i].dstSet = dst_ds.set_;
copy_ds_update[i].dstBinding = i;
copy_ds_update[i].descriptorCount = 1;
}
vkUpdateDescriptorSets(m_device->device(), 0, NULL, copy_size, copy_ds_update);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, NumSamplesMismatch) {
// Create CommandBuffer where MSAA samples doesn't match RenderPass
// sampleCount
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Num samples mismatch! ");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {};
pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci.pNext = NULL;
pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT;
pipe_ms_state_ci.sampleShadingEnable = 0;
pipe_ms_state_ci.minSampleShading = 1.0;
pipe_ms_state_ci.pSampleMask = NULL;
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader
// but add it to be able to run on more devices
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.SetMSAA(&pipe_ms_state_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// Render triangle (the error should trigger on the attempt to draw).
m_commandBuffer->Draw(3, 1, 0, 0);
// Finalize recording of the command buffer
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, RenderPassIncompatible) {
TEST_DESCRIPTION(
"Hit RenderPass incompatible cases. Initial case is drawing with an active renderpass that's not compatible with the bound "
"pipeline state object's creation renderpass");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader
// but add it to be able to run on more devices
// Create a renderpass that will be incompatible with default renderpass
VkAttachmentReference color_att = {};
color_att.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpass = {};
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &color_att;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
VkAttachmentDescription attach_desc = {};
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
// Format incompatible with PSO RP color attach format B8G8R8A8_UNORM
attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = &attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass rp;
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
m_viewports.push_back(viewport);
pipe.SetViewport(m_viewports);
VkRect2D rect = {{0, 0}, {64, 64}};
m_scissors.push_back(rect);
pipe.SetScissor(m_scissors);
pipe.CreateVKPipeline(pipeline_layout.handle(), rp);
VkCommandBufferInheritanceInfo cbii = {};
cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
cbii.renderPass = rp;
cbii.subpass = 0;
VkCommandBufferBeginInfo cbbi = {};
cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cbbi.pInheritanceInfo = &cbii;
vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi);
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-renderPass-00435");
// Render triangle (the error should trigger on the attempt to draw).
m_commandBuffer->Draw(3, 1, 0, 0);
// Finalize recording of the command buffer
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
TEST_F(VkLayerTest, NumBlendAttachMismatch) {
// Create Pipeline where the number of blend attachments doesn't match the
// number of color attachments. In this case, we don't add any color
// blend attachments even though we have a color attachment.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-attachmentCount-00746");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {};
pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci.pNext = NULL;
pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipe_ms_state_ci.sampleShadingEnable = 0;
pipe_ms_state_ci.minSampleShading = 1.0;
pipe_ms_state_ci.pSampleMask = NULL;
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader
// but add it to be able to run on more devices
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.SetMSAA(&pipe_ms_state_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Maint1BindingSliceOf3DImage) {
TEST_DESCRIPTION(
"Attempt to bind a slice of a 3D texture in a descriptor set. This is explicitly disallowed by KHR_maintenance1 to keep "
"things simple for drivers.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
} else {
printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkResult err;
OneOffDescriptorSet set(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkImageCreateInfo ici = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR,
VK_IMAGE_TYPE_3D,
VK_FORMAT_R8G8B8A8_UNORM,
{32, 32, 32},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_SAMPLED_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init(&ici);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
// Meat of the test.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorImageInfo-imageView-00343");
VkDescriptorImageInfo dii = {VK_NULL_HANDLE, view, VK_IMAGE_LAYOUT_GENERAL};
VkWriteDescriptorSet write = {VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, nullptr, set.set_, 0, 0, 1,
VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, &dii, nullptr, nullptr};
vkUpdateDescriptorSets(m_device->device(), 1, &write, 0, nullptr);
m_errorMonitor->VerifyFound();
vkDestroyImageView(m_device->device(), view, nullptr);
}
TEST_F(VkLayerTest, MissingClearAttachment) {
TEST_DESCRIPTION("Points to a wrong colorAttachment index in a VkClearAttachment structure passed to vkCmdClearAttachments");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-aspectMask-00015");
VKTriangleTest(BsoFailCmdClearAttachments);
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, ConfirmNoVLErrorWhenVkCmdClearAttachmentsCalledInSecondaryCB) {
TEST_DESCRIPTION(
"This test is to verify that when vkCmdClearAttachments is called by a secondary commandbuffer, the validation layers do "
"not throw an error if the primary commandbuffer begins a renderpass before executing the secondary commandbuffer.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferBeginInfo info = {};
VkCommandBufferInheritanceInfo hinfo = {};
info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
info.pInheritanceInfo = &hinfo;
hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
hinfo.pNext = NULL;
hinfo.renderPass = renderPass();
hinfo.subpass = 0;
hinfo.framebuffer = m_framebuffer;
hinfo.occlusionQueryEnable = VK_FALSE;
hinfo.queryFlags = 0;
hinfo.pipelineStatistics = 0;
secondary.begin(&info);
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 0.0;
color_attachment.clearValue.color.float32[1] = 0.0;
color_attachment.clearValue.color.float32[2] = 0.0;
color_attachment.clearValue.color.float32[3] = 0.0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1};
vkCmdClearAttachments(secondary.handle(), 1, &color_attachment, 1, &clear_rect);
secondary.end();
// Modify clear rect here to verify that it doesn't cause validation error
clear_rect = {{{0, 0}, {99999999, 99999999}}, 0, 0};
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, CmdClearAttachmentTests) {
TEST_DESCRIPTION("Various tests for validating usage of vkCmdClearAttachments");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {};
pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci.pNext = NULL;
pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT;
pipe_ms_state_ci.sampleShadingEnable = 0;
pipe_ms_state_ci.minSampleShading = 1.0;
pipe_ms_state_ci.pSampleMask = NULL;
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
// We shouldn't need a fragment shader but add it to be able to run
// on more devices
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.SetMSAA(&pipe_ms_state_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Main thing we care about for this test is that the VkImage obj we're
// clearing matches Color Attachment of FB
// Also pass down other dummy params to keep driver and paramchecker happy
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 1.0;
color_attachment.clearValue.color.float32[1] = 1.0;
color_attachment.clearValue.color.float32[2] = 1.0;
color_attachment.clearValue.color.float32[3] = 1.0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1};
// Call for full-sized FB Color attachment prior to issuing a Draw
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"vkCmdClearAttachments() issued on command buffer object ");
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
clear_rect.rect.extent.width = renderPassBeginInfo().renderArea.extent.width + 4;
clear_rect.rect.extent.height = clear_rect.rect.extent.height / 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016");
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
// baseLayer >= view layers
clear_rect.rect.extent.width = (uint32_t)m_width;
clear_rect.baseArrayLayer = 1;
clear_rect.layerCount = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017");
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
// baseLayer + layerCount > view layers
clear_rect.rect.extent.width = (uint32_t)m_width;
clear_rect.baseArrayLayer = 0;
clear_rect.layerCount = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00017");
vkCmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, VtxBufferBadIndex) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"but no vertex buffers are attached to this Pipeline State Object");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {};
pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci.pNext = NULL;
pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipe_ms_state_ci.sampleShadingEnable = 0;
pipe_ms_state_ci.minSampleShading = 1.0;
pipe_ms_state_ci.pSampleMask = NULL;
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this); // We shouldn't need a fragment shader
// but add it to be able to run on more devices
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.SetMSAA(&pipe_ms_state_ci);
pipe.SetViewport(m_viewports);
pipe.SetScissor(m_scissors);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// Don't care about actual data, just need to get to draw to flag error
static const float vbo_data[3] = {1.f, 0.f, 1.f};
VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)0, 1); // VBO idx 1, but no VBO in PSO
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, MismatchCountQueueCreateRequestedFeature) {
TEST_DESCRIPTION("Use an invalid count in a vkEnumeratePhysicalDevices call.Use invalid Queue Family Index in vkCreateDevice");
ASSERT_NO_FATAL_FAILURE(Init());
// The following test fails with recent NVidia drivers.
// By the time core_validation is reached, the NVidia
// driver has sanitized the invalid condition and core_validation
// is not introduced to the failure condition. This is not the case
// with AMD and Mesa drivers. Further investigation is required
// uint32_t count = static_cast<uint32_t>(~0);
// VkPhysicalDevice physical_device;
// vkEnumeratePhysicalDevices(instance(), &count, &physical_device);
// m_errorMonitor->VerifyFound();
float queue_priority = 0.0;
VkDeviceQueueCreateInfo queue_create_info = {};
queue_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_create_info.queueCount = 1;
queue_create_info.pQueuePriorities = &queue_priority;
queue_create_info.queueFamilyIndex = static_cast<uint32_t>(~0);
VkPhysicalDeviceFeatures features = m_device->phy().features();
VkDevice testDevice;
VkDeviceCreateInfo device_create_info = {};
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.queueCreateInfoCount = 1;
device_create_info.pQueueCreateInfos = &queue_create_info;
device_create_info.pEnabledFeatures = &features;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDeviceQueueCreateInfo-queueFamilyIndex-00381");
// The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do
// not use the LunarG loader (e.g. Android) will not see the message and the test will fail.
m_errorMonitor->SetUnexpectedError("Failed to create device chain.");
vkCreateDevice(gpu(), &device_create_info, nullptr, &testDevice);
m_errorMonitor->VerifyFound();
vk_testing::QueueCreateInfoArray queue_info_obj(m_device->queue_props);
device_create_info.queueCreateInfoCount = queue_info_obj.size();
device_create_info.pQueueCreateInfos = queue_info_obj.data();
unsigned feature_count = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
VkBool32 *feature_array = reinterpret_cast<VkBool32 *>(&features);
for (unsigned i = 0; i < feature_count; i++) {
if (VK_FALSE == feature_array[i]) {
feature_array[i] = VK_TRUE;
device_create_info.pEnabledFeatures = &features;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"While calling vkCreateDevice(), requesting feature");
// The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms
// that do not use the LunarG loader (e.g. Android) will not see the message and the test will fail.
m_errorMonitor->SetUnexpectedError("Failed to create device chain.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"You requested features that are unavailable on this device. You should first "
"query feature availability by calling vkGetPhysicalDeviceFeatures().");
vkCreateDevice(gpu(), &device_create_info, nullptr, &testDevice);
m_errorMonitor->VerifyFound();
break;
}
}
}
TEST_F(VkLayerTest, InvalidQueryPoolCreate) {
TEST_DESCRIPTION("Attempt to create a query pool for PIPELINE_STATISTICS without enabling pipeline stats for the device.");
ASSERT_NO_FATAL_FAILURE(Init());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
VkDevice local_device;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
// Intentionally disable pipeline stats
features.pipelineStatisticsQuery = VK_FALSE;
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.pEnabledFeatures = &features;
VkResult err = vkCreateDevice(gpu(), &device_create_info, nullptr, &local_device);
ASSERT_VK_SUCCESS(err);
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
qpci.queryCount = 1;
VkQueryPool query_pool;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkQueryPoolCreateInfo-queryType-00791");
vkCreateQueryPool(local_device, &qpci, nullptr, &query_pool);
m_errorMonitor->VerifyFound();
vkDestroyDevice(local_device, nullptr);
}
TEST_F(VkLayerTest, UnclosedQuery) {
TEST_DESCRIPTION("End a command buffer with a query still in progress.");
const char *invalid_query = "Ending command buffer with in progress query: queryPool 0x";
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_query);
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info = {};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_OCCLUSION;
query_pool_create_info.queryCount = 1;
vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0 /*startQuery*/, 1 /*queryCount*/);
vkCmdBeginQuery(m_commandBuffer->handle(), query_pool, 0, 0);
vkEndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
vkDestroyQueryPool(m_device->device(), query_pool, nullptr);
vkDestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, VertexBufferInvalid) {
TEST_DESCRIPTION(
"Submit a command buffer using deleted vertex buffer, delete a buffer twice, use an invalid offset for each buffer type, "
"and attempt to bind a null buffer");
const char *deleted_buffer_in_command_buffer = "Cannot submit cmd buffer using deleted buffer ";
const char *invalid_offset_message = "vkBindBufferMemory(): memoryOffset is 0x";
const char *invalid_storage_buffer_offset_message = "vkBindBufferMemory(): storage memoryOffset is 0x";
const char *invalid_texel_buffer_offset_message = "vkBindBufferMemory(): texel memoryOffset is 0x";
const char *invalid_uniform_buffer_offset_message = "vkBindBufferMemory(): uniform memoryOffset is 0x";
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = {};
pipe_ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
pipe_ms_state_ci.pNext = NULL;
pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
pipe_ms_state_ci.sampleShadingEnable = 0;
pipe_ms_state_ci.minSampleShading = 1.0;
pipe_ms_state_ci.pSampleMask = nullptr;
const VkPipelineLayoutObj pipeline_layout(m_device);
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.SetMSAA(&pipe_ms_state_ci);
pipe.SetViewport(m_viewports);
pipe.SetScissor(m_scissors);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
{
// Create and bind a vertex buffer in a reduced scope, which will cause
// it to be deleted upon leaving this scope
const float vbo_data[3] = {1.f, 0.f, 1.f};
VkVerticesObj draw_verticies(m_device, 1, 1, sizeof(vbo_data[0]), sizeof(vbo_data) / sizeof(vbo_data[0]), vbo_data);
draw_verticies.BindVertexBuffers(m_commandBuffer->handle());
draw_verticies.AddVertexInputToPipe(pipe);
}
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, deleted_buffer_in_command_buffer);
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyFound();
{
// Create and bind a vertex buffer in a reduced scope, and delete it
// twice, the second through the destructor
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eDoubleDelete);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBuffer-buffer-parameter");
buffer_test.TestDoubleDestroy();
}
m_errorMonitor->VerifyFound();
m_errorMonitor->SetUnexpectedError("value of pCreateInfo->usage must not be 0");
if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidMemoryOffset)) {
// Create and bind a memory buffer with an invalid offset.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_offset_message);
m_errorMonitor->SetUnexpectedError(
"If buffer was created with the VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT or VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, "
"memoryOffset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment");
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VkBufferTest::eInvalidMemoryOffset);
(void)buffer_test;
m_errorMonitor->VerifyFound();
}
if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidDeviceOffset,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) {
// Create and bind a memory buffer with an invalid offset again,
// but look for a texel buffer message.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_texel_buffer_offset_message);
m_errorMonitor->SetUnexpectedError(
"memoryOffset must be an integer multiple of the alignment member of the VkMemoryRequirements structure returned from "
"a call to vkGetBufferMemoryRequirements with buffer");
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, VkBufferTest::eInvalidDeviceOffset);
(void)buffer_test;
m_errorMonitor->VerifyFound();
}
if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidDeviceOffset, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) {
// Create and bind a memory buffer with an invalid offset again, but
// look for a uniform buffer message.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_uniform_buffer_offset_message);
m_errorMonitor->SetUnexpectedError(
"memoryOffset must be an integer multiple of the alignment member of the VkMemoryRequirements structure returned from "
"a call to vkGetBufferMemoryRequirements with buffer");
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VkBufferTest::eInvalidDeviceOffset);
(void)buffer_test;
m_errorMonitor->VerifyFound();
}
if (VkBufferTest::GetTestConditionValid(m_device, VkBufferTest::eInvalidDeviceOffset, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) {
// Create and bind a memory buffer with an invalid offset again, but
// look for a storage buffer message.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, invalid_storage_buffer_offset_message);
m_errorMonitor->SetUnexpectedError(
"memoryOffset must be an integer multiple of the alignment member of the VkMemoryRequirements structure returned from "
"a call to vkGetBufferMemoryRequirements with buffer");
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eInvalidDeviceOffset);
(void)buffer_test;
m_errorMonitor->VerifyFound();
}
{
// Attempt to bind a null buffer.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkBindBufferMemory: required parameter buffer specified as VK_NULL_HANDLE");
VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindNullBuffer);
(void)buffer_test;
m_errorMonitor->VerifyFound();
}
{
// Attempt to bind a fake buffer.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-buffer-parameter");
VkBufferTest buffer_test(m_device, 0, VkBufferTest::eBindFakeBuffer);
(void)buffer_test;
m_errorMonitor->VerifyFound();
}
{
// Attempt to use an invalid handle to delete a buffer.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkFreeMemory-memory-parameter");
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, VkBufferTest::eFreeInvalidHandle);
(void)buffer_test;
}
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, BadVertexBufferOffset) {
TEST_DESCRIPTION("Submit an offset past the end of a vertex buffer");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
static const float vbo_data[3] = {1.f, 0.f, 1.f};
VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindVertexBuffers-pOffsets-00626");
m_commandBuffer->BindVertexBuffer(&vbo, (VkDeviceSize)(3 * sizeof(float)), 1); // Offset at the end of the buffer
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
// INVALID_IMAGE_LAYOUT tests (one other case is hit by MapMemWithoutHostVisibleBit and not here)
TEST_F(VkLayerTest, InvalidImageLayout) {
TEST_DESCRIPTION(
"Hit all possible validation checks associated with the DRAWSTATE_INVALID_IMAGE_LAYOUT enum. Generally these involve "
"having images in the wrong layout when they're copied or transitioned.");
// 3 in ValidateCmdBufImageLayouts
// * -1 Attempt to submit cmd buf w/ deleted image
// * -2 Cmd buf submit of image w/ layout not matching first use w/ subresource
// * -3 Cmd buf submit of image w/ layout not matching first use w/o subresource
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
// Create src & dst images to use for copy operations
VkImage src_image;
VkImage dst_image;
VkImage depth_image;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 4;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_create_info.flags = 0;
VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &src_image);
ASSERT_VK_SUCCESS(err);
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dst_image);
ASSERT_VK_SUCCESS(err);
image_create_info.format = VK_FORMAT_D16_UNORM;
image_create_info.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &depth_image);
ASSERT_VK_SUCCESS(err);
// Allocate memory
VkMemoryRequirements img_mem_reqs = {};
VkMemoryAllocateInfo mem_alloc = {};
VkDeviceMemory src_image_mem, dst_image_mem, depth_image_mem;
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), src_image, &img_mem_reqs);
mem_alloc.allocationSize = img_mem_reqs.size;
bool pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &src_image_mem);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), dst_image, &img_mem_reqs);
mem_alloc.allocationSize = img_mem_reqs.size;
pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &dst_image_mem);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), depth_image, &img_mem_reqs);
mem_alloc.allocationSize = img_mem_reqs.size;
pass = m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &mem_alloc, 0);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &depth_image_mem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), src_image, src_image_mem, 0);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), dst_image, dst_image_mem, 0);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), depth_image, depth_image_mem, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.srcOffset.x = 0;
copy_region.srcOffset.y = 0;
copy_region.srcOffset.z = 0;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.mipLevel = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.layerCount = 1;
copy_region.dstOffset.x = 0;
copy_region.dstOffset.y = 0;
copy_region.dstOffset.z = 0;
copy_region.extent.width = 1;
copy_region.extent.height = 1;
copy_region.extent.depth = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL.");
m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL.");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
// The first call hits the expected WARNING and skips the call down the chain, so call a second time to call down chain and
// update layer state
m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL.");
m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL.");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
// Now cause error due to src image layout changing
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00128");
m_errorMonitor->SetUnexpectedError("is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
// Final src error is due to bad layout type
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImageLayout-00129");
m_errorMonitor->SetUnexpectedError(
"with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL.");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_UNDEFINED, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
// Now verify same checks for dst
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"layout should be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL instead of GENERAL.");
m_errorMonitor->SetUnexpectedError("layout should be VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL instead of GENERAL.");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
// Now cause error due to src image layout changing
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00133");
m_errorMonitor->SetUnexpectedError(
"is VK_IMAGE_LAYOUT_UNDEFINED but can only be VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL or VK_IMAGE_LAYOUT_GENERAL.");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, &copy_region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstImageLayout-00134");
m_errorMonitor->SetUnexpectedError(
"with specific layout VK_IMAGE_LAYOUT_UNDEFINED that doesn't match the actual current layout VK_IMAGE_LAYOUT_GENERAL.");
m_commandBuffer->CopyImage(src_image, VK_IMAGE_LAYOUT_GENERAL, dst_image, VK_IMAGE_LAYOUT_UNDEFINED, 1, &copy_region);
m_errorMonitor->VerifyFound();
// Convert dst and depth images to TRANSFER_DST for subsequent tests
VkImageMemoryBarrier transfer_dst_image_barrier[1] = {};
transfer_dst_image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
transfer_dst_image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
transfer_dst_image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
transfer_dst_image_barrier[0].srcAccessMask = 0;
transfer_dst_image_barrier[0].dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
transfer_dst_image_barrier[0].image = dst_image;
transfer_dst_image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers;
transfer_dst_image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels;
transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
NULL, 0, NULL, 1, transfer_dst_image_barrier);
transfer_dst_image_barrier[0].image = depth_image;
transfer_dst_image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
NULL, 0, NULL, 1, transfer_dst_image_barrier);
// Cause errors due to clearing with invalid image layouts
VkClearColorValue color_clear_value = {};
VkImageSubresourceRange clear_range;
clear_range.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
clear_range.baseMipLevel = 0;
clear_range.baseArrayLayer = 0;
clear_range.layerCount = 1;
clear_range.levelCount = 1;
// Fail due to explicitly prohibited layout for color clear (only GENERAL and TRANSFER_DST are permitted).
// Since the image is currently not in UNDEFINED layout, this will emit two errors.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00005");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004");
m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_UNDEFINED, &color_clear_value, 1, &clear_range);
m_errorMonitor->VerifyFound();
// Fail due to provided layout not matching actual current layout for color clear.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearColorImage-imageLayout-00004");
m_commandBuffer->ClearColorImage(dst_image, VK_IMAGE_LAYOUT_GENERAL, &color_clear_value, 1, &clear_range);
m_errorMonitor->VerifyFound();
VkClearDepthStencilValue depth_clear_value = {};
clear_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
// Fail due to explicitly prohibited layout for depth clear (only GENERAL and TRANSFER_DST are permitted).
// Since the image is currently not in UNDEFINED layout, this will emit two errors.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00012");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011");
m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_UNDEFINED, &depth_clear_value, 1, &clear_range);
m_errorMonitor->VerifyFound();
// Fail due to provided layout not matching actual current layout for depth clear.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearDepthStencilImage-imageLayout-00011");
m_commandBuffer->ClearDepthStencilImage(depth_image, VK_IMAGE_LAYOUT_GENERAL, &depth_clear_value, 1, &clear_range);
m_errorMonitor->VerifyFound();
// Now cause error due to bad image layout transition in PipelineBarrier
VkImageMemoryBarrier image_barrier[1] = {};
image_barrier[0].sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
image_barrier[0].oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
image_barrier[0].newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
image_barrier[0].image = src_image;
image_barrier[0].subresourceRange.layerCount = image_create_info.arrayLayers;
image_barrier[0].subresourceRange.levelCount = image_create_info.mipLevels;
image_barrier[0].subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01197");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageMemoryBarrier-oldLayout-01210");
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
NULL, 0, NULL, 1, image_barrier);
m_errorMonitor->VerifyFound();
// Finally some layout errors at RenderPass create time
// Just hacking in specific state to get to the errors we want so don't copy this unless you know what you're doing.
VkAttachmentReference attach = {};
// perf warning for GENERAL layout w/ non-DS input attachment
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
VkSubpassDescription subpass = {};
subpass.inputAttachmentCount = 1;
subpass.pInputAttachments = &attach;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
VkAttachmentDescription attach_desc = {};
attach_desc.format = VK_FORMAT_UNDEFINED;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = &attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass rp;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
// error w/ non-general layout
attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Layout for input attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be READ_ONLY_OPTIMAL or GENERAL.");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
subpass.inputAttachmentCount = 0;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &attach;
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
// perf warning for GENERAL layout on color attachment
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
// error w/ non-color opt or GENERAL layout for color attachment
attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Layout for color attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
subpass.colorAttachmentCount = 0;
subpass.pDepthStencilAttachment = &attach;
attach.layout = VK_IMAGE_LAYOUT_GENERAL;
// perf warning for GENERAL layout on DS attachment
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
"GENERAL layout for depth attachment may not give optimal performance.");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
// error w/ non-ds opt or GENERAL layout for color attachment
attach.layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Layout for depth attachment is VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL but can only be "
"DEPTH_STENCIL_ATTACHMENT_OPTIMAL, DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
// For this error we need a valid renderpass so create default one
attach.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
attach.attachment = 0;
attach_desc.format = depth_format;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
// Can't do a CLEAR load on READ_ONLY initialLayout
attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"with invalid first layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
vkFreeMemory(m_device->device(), src_image_mem, NULL);
vkFreeMemory(m_device->device(), dst_image_mem, NULL);
vkFreeMemory(m_device->device(), depth_image_mem, NULL);
vkDestroyImage(m_device->device(), src_image, NULL);
vkDestroyImage(m_device->device(), dst_image, NULL);
vkDestroyImage(m_device->device(), depth_image, NULL);
}
TEST_F(VkLayerTest, InvalidStorageImageLayout) {
TEST_DESCRIPTION("Attempt to update a STORAGE_IMAGE descriptor w/o GENERAL layout.");
ASSERT_NO_FATAL_FAILURE(Init());
const VkFormat tex_format = VK_FORMAT_R8G8B8A8_UNORM;
VkImageTiling tiling;
VkFormatProperties format_properties;
vkGetPhysicalDeviceFormatProperties(gpu(), tex_format, &format_properties);
if (format_properties.linearTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) {
tiling = VK_IMAGE_TILING_LINEAR;
} else if (format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) {
tiling = VK_IMAGE_TILING_OPTIMAL;
} else {
printf("%s Device does not support VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT; skipped.\n", kSkipPrefix);
return;
}
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkImageObj image(m_device);
image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_STORAGE_BIT, tiling, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(tex_format);
VkDescriptorImageInfo image_info = {};
image_info.imageView = view;
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
descriptor_write.pImageInfo = &image_info;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout "
"VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL but according to spec ");
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, NonSimultaneousSecondaryMarksPrimary) {
ASSERT_NO_FATAL_FAILURE(Init());
const char *simultaneous_use_message =
"does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer";
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
secondary.begin();
secondary.end();
VkCommandBufferBeginInfo cbbi = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
nullptr,
VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
nullptr,
};
m_commandBuffer->begin(&cbbi);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, simultaneous_use_message);
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, SimultaneousUseSecondaryTwoExecutes) {
ASSERT_NO_FATAL_FAILURE(Init());
const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!";
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferInheritanceInfo inh = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
nullptr,
};
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh};
secondary.begin(&cbbi);
secondary.end();
m_commandBuffer->begin();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message);
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, SimultaneousUseSecondarySingleExecute) {
ASSERT_NO_FATAL_FAILURE(Init());
// variation on previous test executing the same CB twice in the same
// CmdExecuteCommands call
const char *simultaneous_use_message = "without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set!";
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkCommandBufferInheritanceInfo inh = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO,
nullptr,
};
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, &inh};
secondary.begin(&cbbi);
secondary.end();
m_commandBuffer->begin();
VkCommandBuffer cbs[] = {secondary.handle(), secondary.handle()};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message);
vkCmdExecuteCommands(m_commandBuffer->handle(), 2, cbs);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, SimultaneousUseOneShot) {
TEST_DESCRIPTION("Submit the same command buffer twice in one submit looking for simultaneous use and one time submit errors");
const char *simultaneous_use_message = "is already in use and is not marked for simultaneous use";
const char *one_shot_message = "VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT set, but has been submitted";
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandBuffer cmd_bufs[2];
VkCommandBufferAllocateInfo alloc_info;
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.commandBufferCount = 2;
alloc_info.commandPool = m_commandPool->handle();
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs);
VkCommandBufferBeginInfo cb_binfo;
cb_binfo.pNext = NULL;
cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cb_binfo.pInheritanceInfo = VK_NULL_HANDLE;
cb_binfo.flags = 0;
vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(cmd_bufs[0], 0, 1, &viewport);
vkEndCommandBuffer(cmd_bufs[0]);
VkCommandBuffer duplicates[2] = {cmd_bufs[0], cmd_bufs[0]};
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 2;
submit_info.pCommandBuffers = duplicates;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, simultaneous_use_message);
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Set one time use and now look for one time submit
duplicates[0] = duplicates[1] = cmd_bufs[1];
cb_binfo.flags = VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT | VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo);
vkCmdSetViewport(cmd_bufs[1], 0, 1, &viewport);
vkEndCommandBuffer(cmd_bufs[1]);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, one_shot_message);
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
}
TEST_F(VkLayerTest, StageMaskGsTsEnabled) {
TEST_DESCRIPTION(
"Attempt to use a stageMask w/ geometry shader and tesselation shader bits enabled when those features are disabled on the "
"device.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Make sure gs & ts are disabled
features.geometryShader = false;
features.tessellationShader = false;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = test_device.graphics_queue_node_index_;
VkCommandPool command_pool;
vkCreateCommandPool(test_device.handle(), &pool_create_info, nullptr, &command_pool);
VkCommandBufferAllocateInfo cmd = {};
cmd.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd.pNext = NULL;
cmd.commandPool = command_pool;
cmd.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd.commandBufferCount = 1;
VkCommandBuffer cmd_buffer;
VkResult err = vkAllocateCommandBuffers(test_device.handle(), &cmd, &cmd_buffer);
ASSERT_VK_SUCCESS(err);
VkEvent event;
VkEventCreateInfo evci = {};
evci.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
VkResult result = vkCreateEvent(test_device.handle(), &evci, NULL, &event);
ASSERT_VK_SUCCESS(result);
VkCommandBufferBeginInfo cbbi = {};
cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(cmd_buffer, &cbbi);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01150");
vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetEvent-stageMask-01151");
vkCmdSetEvent(cmd_buffer, event, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT);
m_errorMonitor->VerifyFound();
vkDestroyEvent(test_device.handle(), event, NULL);
vkDestroyCommandPool(test_device.handle(), command_pool, NULL);
}
TEST_F(VkLayerTest, EventInUseDestroyedSignaled) {
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event);
vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
m_commandBuffer->end();
vkDestroyEvent(m_device->device(), event, nullptr);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is invalid because bound");
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InUseDestroyedSignaled) {
TEST_DESCRIPTION(
"Use vkCmdExecuteCommands with invalid state in primary and secondary command buffers. Delete objects that are in use. "
"Call VkQueueSubmit with an event that has been deleted.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->ExpectSuccess();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkFence fence;
ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence));
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkBufferTest buffer_test(m_device, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
VkDescriptorBufferInfo buffer_info = {};
buffer_info.buffer = buffer_test.GetBuffer();
buffer_info.offset = 0;
buffer_info.range = 1024;
VkWriteDescriptorSet write_descriptor_set = {};
write_descriptor_set.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor_set.dstSet = ds.set_;
write_descriptor_set.descriptorCount = 1;
write_descriptor_set.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
write_descriptor_set.pBufferInfo = &buffer_info;
vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor_set, 0, nullptr);
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
pipe.CreateVKPipeline(pipeline_layout.handle(), m_renderPass);
VkEvent event;
VkEventCreateInfo event_create_info = {};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event);
m_commandBuffer->begin();
vkCmdSetEvent(m_commandBuffer->handle(), event, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0,
NULL);
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence);
m_errorMonitor->Reset(); // resume logmsg processing
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyEvent-event-01145");
vkDestroyEvent(m_device->device(), event, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySemaphore-semaphore-01137");
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Fence 0x");
vkDestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If semaphore is not VK_NULL_HANDLE, semaphore must be a valid VkSemaphore handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Semaphore obj");
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->SetUnexpectedError("If fence is not VK_NULL_HANDLE, fence must be a valid VkFence handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Fence obj");
vkDestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->SetUnexpectedError("If event is not VK_NULL_HANDLE, event must be a valid VkEvent handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Event obj");
vkDestroyEvent(m_device->device(), event, nullptr);
}
TEST_F(VkLayerTest, QueryPoolInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use query pool.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_ci{};
query_pool_ci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_ci.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_ci.queryCount = 1;
vkCreateQueryPool(m_device->device(), &query_pool_ci, nullptr, &query_pool);
m_commandBuffer->begin();
// Reset query pool to create binding with cmd buffer
vkCmdResetQueryPool(m_commandBuffer->handle(), query_pool, 0, 1);
m_commandBuffer->end();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetQueryPoolResults-queryType-00818");
uint32_t data_space[16];
m_errorMonitor->SetUnexpectedError("Cannot get query results on queryPool");
vkGetQueryPoolResults(m_device->handle(), query_pool, 0, 1, sizeof(data_space), &data_space, sizeof(uint32_t),
VK_QUERY_RESULT_PARTIAL_BIT);
m_errorMonitor->VerifyFound();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy query pool while in-flight
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyQueryPool-queryPool-00793");
vkDestroyQueryPool(m_device->handle(), query_pool, NULL);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Now that cmd buffer done we can safely destroy query_pool
m_errorMonitor->SetUnexpectedError("If queryPool is not VK_NULL_HANDLE, queryPool must be a valid VkQueryPool handle");
m_errorMonitor->SetUnexpectedError("Unable to remove QueryPool obj");
vkDestroyQueryPool(m_device->handle(), query_pool, NULL);
}
TEST_F(VkLayerTest, PipelineInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use pipeline.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const VkPipelineLayoutObj pipeline_layout(m_device);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyPipeline-pipeline-00765");
// Create PSO to be used for draw-time errors below
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
// Store pipeline handle so we can actually delete it before test finishes
VkPipeline delete_this_pipeline;
{ // Scope pipeline so it will be auto-deleted
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
delete_this_pipeline = pipe.handle();
m_commandBuffer->begin();
// Bind pipeline to cmd buffer
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then pipeline destroyed while in-flight
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
} // Pipeline deletion triggered here
m_errorMonitor->VerifyFound();
// Make sure queue finished and then actually delete pipeline
vkQueueWaitIdle(m_device->m_queue);
m_errorMonitor->SetUnexpectedError("If pipeline is not VK_NULL_HANDLE, pipeline must be a valid VkPipeline handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Pipeline obj");
vkDestroyPipeline(m_device->handle(), delete_this_pipeline, nullptr);
}
TEST_F(VkLayerTest, CreateImageViewBreaksParameterCompatibilityRequirements) {
TEST_DESCRIPTION(
"Attempts to create an Image View with a view type that does not match the image type it is being created from.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkPhysicalDeviceMemoryProperties memProps;
vkGetPhysicalDeviceMemoryProperties(m_device->phy().handle(), &memProps);
// Test mismatch detection for image of type VK_IMAGE_TYPE_1D
VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_1D,
VK_FORMAT_R8G8B8A8_UNORM,
{1, 1, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image1D(m_device);
image1D.init(&imgInfo);
ASSERT_TRUE(image1D.initialized());
// Initialize VkImageViewCreateInfo with mismatched viewType
VkImageView imageView;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image1D.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_2D is not compatible with image");
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Test mismatch detection for image of type VK_IMAGE_TYPE_2D
imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{1, 1, 1},
1,
6,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image2D(m_device);
image2D.init(&imgInfo);
ASSERT_TRUE(image2D.initialized());
// Initialize VkImageViewCreateInfo with mismatched viewType
ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image2D.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_3D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_3D is not compatible with image");
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Change VkImageViewCreateInfo to different mismatched viewType
ivci.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
ivci.subresourceRange.layerCount = 6;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01003");
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Test mismatch detection for image of type VK_IMAGE_TYPE_3D
imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_3D,
VK_FORMAT_R8G8B8A8_UNORM,
{1, 1, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image3D(m_device);
image3D.init(&imgInfo);
ASSERT_TRUE(image3D.initialized());
// Initialize VkImageViewCreateInfo with mismatched viewType
ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image3D.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_1D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreateImageView(): pCreateInfo->viewType VK_IMAGE_VIEW_TYPE_1D is not compatible with image");
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Change VkImageViewCreateInfo to different mismatched viewType
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
// Test for error message
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01005");
} else {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subResourceRange-01021");
}
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Check if the device can make the image required for this test case.
VkImageFormatProperties formProps = {{0, 0, 0}, 0, 0, 0, 0};
VkResult res = vkGetPhysicalDeviceImageFormatProperties(
m_device->phy().handle(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_3D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT,
&formProps);
// If not, skip this part of the test.
if (res || !m_device->phy().features().sparseBinding ||
!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
printf("%s %s is not supported.\n", kSkipPrefix, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
return;
}
// Initialize VkImageCreateInfo with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR and VK_IMAGE_CREATE_SPARSE_BINDING_BIT which
// are incompatible create flags.
imgInfo = {
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR | VK_IMAGE_CREATE_SPARSE_BINDING_BIT,
VK_IMAGE_TYPE_3D,
VK_FORMAT_R8G8B8A8_UNORM,
{1, 1, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImage imageSparse;
// Creating a sparse image means we should not bind memory to it.
res = vkCreateImage(m_device->device(), &imgInfo, NULL, &imageSparse);
ASSERT_FALSE(res);
// Initialize VkImageViewCreateInfo to create a view that will attempt to utilize VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR.
ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = imageSparse;
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.");
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Clean up
vkDestroyImage(m_device->device(), imageSparse, nullptr);
}
TEST_F(VkLayerTest, CreateImageViewFormatFeatureMismatch) {
TEST_DESCRIPTION("Create view with a format that does not have the same features as the image format.");
if (!EnableDeviceProfileLayer()) {
printf("%s Failed to enable device profile layer.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr;
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr;
// Load required functions
if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
printf("%s Failed to device profile layer.\n", kSkipPrefix);
return;
}
// List of features to be tested
VkFormatFeatureFlagBits features[] = {VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT,
VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT};
uint32_t feature_count = 4;
// List of usage cases for each feature test
VkImageUsageFlags usages[] = {VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT};
// List of errors that will be thrown in order of tests run
std::string optimal_error_codes[] = {
"VUID-VkImageViewCreateInfo-image-01013",
"VUID-VkImageViewCreateInfo-image-01014",
"VUID-VkImageViewCreateInfo-image-01015",
"VUID-VkImageViewCreateInfo-image-01016",
};
VkFormatProperties formatProps;
// First three tests
uint32_t i = 0;
for (i = 0; i < (feature_count - 1); i++) {
// Modify formats to have mismatched features
// Format for image
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps);
formatProps.optimalTilingFeatures |= features[i];
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps);
memset(&formatProps, 0, sizeof(formatProps));
// Format for view
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps);
formatProps.optimalTilingFeatures = features[(i + 1) % feature_count];
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps);
// Create image with modified format
VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R32G32B32A32_UINT,
{1, 1, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
usages[i],
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init(&imgInfo);
ASSERT_TRUE(image.initialized());
VkImageView imageView;
// Initialize VkImageViewCreateInfo with modified format
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R32G32B32A32_SINT;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]);
VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
if (!res) {
vkDestroyImageView(m_device->device(), imageView, nullptr);
}
}
// Test for VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT. Needs special formats
// Only run this test if format supported
if (!ImageFormatIsSupported(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, VK_IMAGE_TILING_OPTIMAL)) {
printf("%s VK_FORMAT_D24_UNORM_S8_UINT format not supported - skipped.\n", kSkipPrefix);
return;
}
// Modify formats to have mismatched features
// Format for image
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, &formatProps);
formatProps.optimalTilingFeatures |= features[i];
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D24_UNORM_S8_UINT, formatProps);
memset(&formatProps, 0, sizeof(formatProps));
// Format for view
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &formatProps);
formatProps.optimalTilingFeatures = features[(i + 1) % feature_count];
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, formatProps);
// Create image with modified format
VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_D24_UNORM_S8_UINT,
{1, 1, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
usages[i],
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init(&imgInfo);
ASSERT_TRUE(image.initialized());
VkImageView imageView;
// Initialize VkImageViewCreateInfo with modified format
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_D32_SFLOAT_S8_UINT;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
// Test for error message
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, optimal_error_codes[i]);
VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
if (!res) {
vkDestroyImageView(m_device->device(), imageView, nullptr);
}
}
TEST_F(VkLayerTest, InvalidImageViewUsageCreateInfo) {
TEST_DESCRIPTION("Usage modification via a chained VkImageViewUsageCreateInfo struct");
if (!EnableDeviceProfileLayer()) {
printf("%s Test requires DeviceProfileLayer, unavailable - skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE2_EXTENSION_NAME)) {
printf("%s Test requires API >= 1.1 or KHR_MAINTENANCE2 extension, unavailable - skipped.\n", kSkipPrefix);
return;
}
m_device_extension_names.push_back(VK_KHR_MAINTENANCE2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr;
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr;
// Load required functions
if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
printf("%s Required extensions are not avaiable.\n", kSkipPrefix);
return;
}
VkFormatProperties formatProps;
// Ensure image format claims support for sampled and storage, excludes color attachment
memset(&formatProps, 0, sizeof(formatProps));
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, &formatProps);
formatProps.optimalTilingFeatures |= (VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT);
formatProps.optimalTilingFeatures = formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_UINT, formatProps);
// Create image with sampled and storage usages
VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R32G32B32A32_UINT,
{1, 1, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init(&imgInfo);
ASSERT_TRUE(image.initialized());
// Force the imageview format to exclude storage feature, include color attachment
memset(&formatProps, 0, sizeof(formatProps));
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, &formatProps);
formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
formatProps.optimalTilingFeatures = (formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT);
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R32G32B32A32_SINT, formatProps);
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R32G32B32A32_SINT;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// ImageView creation should fail because view format doesn't support all the underlying image's usages
VkImageView imageView;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01014");
VkResult res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
// Add a chained VkImageViewUsageCreateInfo to override original image usage bits, removing storage
VkImageViewUsageCreateInfo usage_ci = {VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, nullptr, VK_IMAGE_USAGE_SAMPLED_BIT};
// Link the VkImageViewUsageCreateInfo struct into the view's create info pNext chain
ivci.pNext = &usage_ci;
// ImageView should now succeed without error
m_errorMonitor->ExpectSuccess();
res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyNotFound();
if (VK_SUCCESS == res) {
vkDestroyImageView(m_device->device(), imageView, nullptr);
}
// Try a zero usage field
usage_ci.usage = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-requiredbitmask");
res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == res) {
vkDestroyImageView(m_device->device(), imageView, nullptr);
}
// Try a usage field with a bit not supported by underlying image
usage_ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-01587");
res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == res) {
vkDestroyImageView(m_device->device(), imageView, nullptr);
}
// Try an illegal bit in usage field
usage_ci.usage = 0x10000000 | VK_IMAGE_USAGE_SAMPLED_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewUsageCreateInfo-usage-parameter");
res = vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
if (VK_SUCCESS == res) {
vkDestroyImageView(m_device->device(), imageView, nullptr);
}
}
TEST_F(VkLayerTest, ImageViewInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use imageView.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
err = vkCreateImageView(m_device->device(), &ivci, NULL, &view);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo image_info{};
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
image_info.imageView = view;
image_info.sampler = sampler;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &image_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO to use the sampler
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyImageView-imageView-01026");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Bind pipeline to cmd buffer
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0,
nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer then destroy sampler
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy imageView while in-flight
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkDestroyImageView(m_device->device(), view, nullptr);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Now we can actually destroy imageView
m_errorMonitor->SetUnexpectedError("If imageView is not VK_NULL_HANDLE, imageView must be a valid VkImageView handle");
m_errorMonitor->SetUnexpectedError("Unable to remove ImageView obj");
vkDestroyImageView(m_device->device(), view, NULL);
vkDestroySampler(m_device->device(), sampler, nullptr);
}
TEST_F(VkLayerTest, BufferViewInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use bufferView.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkBuffer buffer;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory buffer_memory;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
bool pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
VkBufferView view;
VkBufferViewCreateInfo bvci = {};
bvci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
bvci.buffer = buffer;
bvci.format = VK_FORMAT_R32_SFLOAT;
bvci.range = VK_WHOLE_SIZE;
err = vkCreateBufferView(m_device->device(), &bvci, NULL, &view);
ASSERT_VK_SUCCESS(err);
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor_write.pTexelBufferView = &view;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0, r32f) uniform readonly imageBuffer s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = imageLoad(s, 0);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroyBufferView-bufferView-00936");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// Bind pipeline to cmd buffer
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0,
nullptr);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy bufferView while in-flight
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkDestroyBufferView(m_device->device(), view, nullptr);
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Now we can actually destroy bufferView
m_errorMonitor->SetUnexpectedError("If bufferView is not VK_NULL_HANDLE, bufferView must be a valid VkBufferView handle");
m_errorMonitor->SetUnexpectedError("Unable to remove BufferView obj");
vkDestroyBufferView(m_device->device(), view, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), buffer_memory, NULL);
}
TEST_F(VkLayerTest, SamplerInUseDestroyedSignaled) {
TEST_DESCRIPTION("Delete in-use sampler.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err;
err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
err = vkCreateImageView(m_device->device(), &ivci, NULL, &view);
ASSERT_VK_SUCCESS(err);
VkDescriptorImageInfo image_info{};
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
image_info.imageView = view;
image_info.sampler = sampler;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &image_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
// Create PSO to use the sampler
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2D s;\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = texture(s, vec2(1));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkDestroySampler-sampler-01082");
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Bind pipeline to cmd buffer
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0,
nullptr);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
m_commandBuffer->Draw(1, 0, 0, 0);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
// Submit cmd buffer then destroy sampler
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_commandBuffer->handle();
// Submit cmd buffer and then destroy sampler while in-flight
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
vkDestroySampler(m_device->device(), sampler, nullptr); // Destroyed too soon
m_errorMonitor->VerifyFound();
vkQueueWaitIdle(m_device->m_queue);
// Now we can actually destroy sampler
m_errorMonitor->SetUnexpectedError("If sampler is not VK_NULL_HANDLE, sampler must be a valid VkSampler handle");
m_errorMonitor->SetUnexpectedError("Unable to remove Sampler obj");
vkDestroySampler(m_device->device(), sampler, NULL); // Destroyed for real
vkDestroyImageView(m_device->device(), view, NULL);
}
TEST_F(VkLayerTest, UpdateDestroyDescriptorSetLayout) {
TEST_DESCRIPTION("Attempt updates to descriptor sets with destroyed descriptor set layouts");
// TODO: Update to match the descriptor set layout specific VUIDs/VALIDATION_ERROR_* when present
const auto kWriteDestroyedLayout = "VUID-VkWriteDescriptorSet-dstSet-00320";
const auto kCopyDstDestroyedLayout = "VUID-VkCopyDescriptorSet-dstSet-parameter";
const auto kCopySrcDestroyedLayout = "VUID-VkCopyDescriptorSet-srcSet-parameter";
ASSERT_NO_FATAL_FAILURE(Init());
// Set up the descriptor (resource) and write/copy operations to use.
float data[16] = {};
VkConstantBufferObj buffer(m_device, sizeof(data), data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
ASSERT_TRUE(buffer.initialized());
VkDescriptorBufferInfo info = {};
info.buffer = buffer.handle();
info.range = VK_WHOLE_SIZE;
VkWriteDescriptorSet write_descriptor = {};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = VK_NULL_HANDLE; // must update this
write_descriptor.dstBinding = 0;
write_descriptor.descriptorCount = 1;
write_descriptor.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
write_descriptor.pBufferInfo = &info;
VkCopyDescriptorSet copy_descriptor = {};
copy_descriptor.sType = VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET;
copy_descriptor.srcSet = VK_NULL_HANDLE; // must update
copy_descriptor.srcBinding = 0;
copy_descriptor.dstSet = VK_NULL_HANDLE; // must update
copy_descriptor.dstBinding = 0;
copy_descriptor.descriptorCount = 1;
// Create valid and invalid source and destination descriptor sets
std::vector<VkDescriptorSetLayoutBinding> one_uniform_buffer = {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
};
OneOffDescriptorSet good_dst(m_device, one_uniform_buffer);
ASSERT_TRUE(good_dst.Initialized());
OneOffDescriptorSet bad_dst(m_device, one_uniform_buffer);
// Must assert before invalidating it below
ASSERT_TRUE(bad_dst.Initialized());
bad_dst.layout_ = VkDescriptorSetLayoutObj();
OneOffDescriptorSet good_src(m_device, one_uniform_buffer);
ASSERT_TRUE(good_src.Initialized());
// Put valid data in the good and bad sources, simultaneously doing a positive test on write and copy operations
m_errorMonitor->ExpectSuccess();
write_descriptor.dstSet = good_src.set_;
vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL);
m_errorMonitor->VerifyNotFound();
OneOffDescriptorSet bad_src(m_device, one_uniform_buffer);
ASSERT_TRUE(bad_src.Initialized());
// to complete our positive testing use copy, where above we used write.
copy_descriptor.srcSet = good_src.set_;
copy_descriptor.dstSet = bad_src.set_;
vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor);
bad_src.layout_ = VkDescriptorSetLayoutObj();
m_errorMonitor->VerifyNotFound();
// Trigger the three invalid use errors
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kWriteDestroyedLayout);
write_descriptor.dstSet = bad_dst.set_;
vkUpdateDescriptorSets(m_device->device(), 1, &write_descriptor, 0, NULL);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopyDstDestroyedLayout);
copy_descriptor.dstSet = bad_dst.set_;
vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, kCopySrcDestroyedLayout);
copy_descriptor.srcSet = bad_src.set_;
copy_descriptor.dstSet = good_dst.set_;
vkUpdateDescriptorSets(m_device->device(), 0, nullptr, 1, &copy_descriptor);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, QueueForwardProgressFenceWait) {
TEST_DESCRIPTION(
"Call VkQueueSubmit with a semaphore that is already signaled but not waited on by the queue. Wait on a fence that has not "
"yet been submitted to a queue.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *queue_forward_progress_message = " that has already been signaled but not waited on by queue 0x";
const char *invalid_fence_wait_message = " which has not been submitted on a Queue or during acquire next image.";
VkCommandBufferObj cb1(m_device, m_commandPool);
cb1.begin();
cb1.end();
VkSemaphoreCreateInfo semaphore_create_info = {};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphore semaphore;
ASSERT_VK_SUCCESS(vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore));
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cb1.handle();
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_commandBuffer->begin();
m_commandBuffer->end();
submit_info.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, queue_forward_progress_message);
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
VkFenceCreateInfo fence_create_info = {};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkFence fence;
ASSERT_VK_SUCCESS(vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence));
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, invalid_fence_wait_message);
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
m_errorMonitor->VerifyFound();
vkDeviceWaitIdle(m_device->device());
vkDestroyFence(m_device->device(), fence, nullptr);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
}
TEST_F(VkLayerTest, FramebufferIncompatible) {
TEST_DESCRIPTION(
"Bind a secondary command buffer with a framebuffer that does not match the framebuffer for the active renderpass.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// A renderpass with one color attachment.
VkAttachmentDescription attachment = {0,
VK_FORMAT_B8G8R8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_B8G8R8A8_UNORM,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
VkCommandBufferAllocateInfo cbai = {};
cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cbai.commandPool = m_commandPool->handle();
cbai.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
cbai.commandBufferCount = 1;
VkCommandBuffer sec_cb;
err = vkAllocateCommandBuffers(m_device->device(), &cbai, &sec_cb);
ASSERT_VK_SUCCESS(err);
VkCommandBufferBeginInfo cbbi = {};
VkCommandBufferInheritanceInfo cbii = {};
cbii.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
cbii.renderPass = renderPass();
cbii.framebuffer = fb;
cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cbbi.pNext = NULL;
cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
cbbi.pInheritanceInfo = &cbii;
vkBeginCommandBuffer(sec_cb, &cbbi);
vkEndCommandBuffer(sec_cb);
VkCommandBufferBeginInfo cbbi2 = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr};
vkBeginCommandBuffer(m_commandBuffer->handle(), &cbbi2);
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" that is not the same as the primary command buffer's current active framebuffer ");
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &sec_cb);
m_errorMonitor->VerifyFound();
// Cleanup
vkCmdEndRenderPass(m_commandBuffer->handle());
vkEndCommandBuffer(m_commandBuffer->handle());
vkDestroyImageView(m_device->device(), view, NULL);
vkDestroyRenderPass(m_device->device(), rp, NULL);
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
TEST_F(VkLayerTest, ColorBlendInvalidLogicOp) {
TEST_DESCRIPTION("Attempt to use invalid VkPipelineColorBlendStateCreateInfo::logicOp value.");
ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().logicOp) {
printf("%s Device does not support logicOp feature; skipped.\n", kSkipPrefix);
return;
}
const auto set_shading_enable = [](CreatePipelineHelper &helper) {
helper.cb_ci_.logicOpEnable = VK_TRUE;
helper.cb_ci_.logicOp = static_cast<VkLogicOp>(VK_LOGIC_OP_END_RANGE + 1); // invalid logicOp to be tested
};
CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607");
}
TEST_F(VkLayerTest, ColorBlendUnsupportedLogicOp) {
TEST_DESCRIPTION("Attempt enabling VkPipelineColorBlendStateCreateInfo::logicOpEnable when logicOp feature is disabled.");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; };
CreatePipelineHelper::OneshotTest(*this, set_shading_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606");
}
TEST_F(VkLayerTest, ColorBlendUnsupportedDualSourceBlend) {
TEST_DESCRIPTION("Attempt to use dual-source blending when dualSrcBlend feature is disabled.");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const auto set_dsb_src_color_enable = [](CreatePipelineHelper &helper) {
helper.cb_attachments_.blendEnable = VK_TRUE;
helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR; // bad!
helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD;
helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD;
};
CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608");
const auto set_dsb_dst_color_enable = [](CreatePipelineHelper &helper) {
helper.cb_attachments_.blendEnable = VK_TRUE;
helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR;
helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR; // bad
helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD;
helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD;
};
CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_color_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609");
const auto set_dsb_src_alpha_enable = [](CreatePipelineHelper &helper) {
helper.cb_attachments_.blendEnable = VK_TRUE;
helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR;
helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD;
helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC1_ALPHA; // bad
helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD;
};
CreatePipelineHelper::OneshotTest(*this, set_dsb_src_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610");
const auto set_dsb_dst_alpha_enable = [](CreatePipelineHelper &helper) {
helper.cb_attachments_.blendEnable = VK_TRUE;
helper.cb_attachments_.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR;
helper.cb_attachments_.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
helper.cb_attachments_.colorBlendOp = VK_BLEND_OP_ADD;
helper.cb_attachments_.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
helper.cb_attachments_.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; // bad!
helper.cb_attachments_.alphaBlendOp = VK_BLEND_OP_ADD;
};
CreatePipelineHelper::OneshotTest(*this, set_dsb_dst_alpha_enable, VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611");
}
#if GTEST_IS_THREADSAFE
struct thread_data_struct {
VkCommandBuffer commandBuffer;
VkDevice device;
VkEvent event;
bool bailout;
};
extern "C" void *AddToCommandBuffer(void *arg) {
struct thread_data_struct *data = (struct thread_data_struct *)arg;
for (int i = 0; i < 80000; i++) {
vkCmdSetEvent(data->commandBuffer, data->event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
if (data->bailout) {
break;
}
}
return NULL;
}
TEST_F(VkLayerTest, ThreadCommandBufferCollision) {
test_platform_thread thread;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Calls AllocateCommandBuffers
VkCommandBufferObj commandBuffer(m_device, m_commandPool);
commandBuffer.begin();
VkEventCreateInfo event_info;
VkEvent event;
VkResult err;
memset(&event_info, 0, sizeof(event_info));
event_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
err = vkCreateEvent(device(), &event_info, NULL, &event);
ASSERT_VK_SUCCESS(err);
err = vkResetEvent(device(), event);
ASSERT_VK_SUCCESS(err);
struct thread_data_struct data;
data.commandBuffer = commandBuffer.handle();
data.event = event;
data.bailout = false;
m_errorMonitor->SetBailout(&data.bailout);
// First do some correct operations using multiple threads.
// Add many entries to command buffer from another thread.
test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data);
// Make non-conflicting calls from this thread at the same time.
for (int i = 0; i < 80000; i++) {
uint32_t count;
vkEnumeratePhysicalDevices(instance(), &count, NULL);
}
test_platform_thread_join(thread, NULL);
// Then do some incorrect operations using multiple threads.
// Add many entries to command buffer from another thread.
test_platform_thread_create(&thread, AddToCommandBuffer, (void *)&data);
// Add many entries to command buffer from this thread at the same time.
AddToCommandBuffer(&data);
test_platform_thread_join(thread, NULL);
commandBuffer.end();
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyFound();
vkDestroyEvent(device(), event, NULL);
}
#endif // GTEST_IS_THREADSAFE
TEST_F(VkLayerTest, InvalidSPIRVCodeSize) {
TEST_DESCRIPTION("Test that errors are produced for a spirv modules with invalid code sizes");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V header");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkShaderModule module;
VkShaderModuleCreateInfo moduleCreateInfo;
struct icd_spv_header spv;
spv.magic = ICD_SPV_MAGIC;
spv.version = ICD_SPV_VERSION;
spv.gen_magic = 0;
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.pNext = NULL;
moduleCreateInfo.pCode = (const uint32_t *)&spv;
moduleCreateInfo.codeSize = 4;
moduleCreateInfo.flags = 0;
vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module);
m_errorMonitor->VerifyFound();
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) out float x;\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
" x = 0;\n"
"}\n";
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01376");
std::vector<unsigned int> shader;
VkShaderModuleCreateInfo module_create_info;
VkShaderModule shader_module;
module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
module_create_info.pNext = NULL;
this->GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, vsSource, shader);
module_create_info.pCode = shader.data();
// Introduce failure by making codeSize a non-multiple of 4
module_create_info.codeSize = shader.size() * sizeof(unsigned int) - 1;
module_create_info.flags = 0;
vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidSPIRVMagic) {
TEST_DESCRIPTION("Test that an error is produced for a spirv module with a bad magic number");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Invalid SPIR-V magic number");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkShaderModule module;
VkShaderModuleCreateInfo moduleCreateInfo;
struct icd_spv_header spv;
spv.magic = (uint32_t)~ICD_SPV_MAGIC;
spv.version = ICD_SPV_VERSION;
spv.gen_magic = 0;
moduleCreateInfo.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
moduleCreateInfo.pNext = NULL;
moduleCreateInfo.pCode = (const uint32_t *)&spv;
moduleCreateInfo.codeSize = sizeof(spv) + 16;
moduleCreateInfo.flags = 0;
vkCreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVertexOutputNotConsumed) {
TEST_DESCRIPTION("Test that a warning is produced for a vertex output that is not consumed by the fragment stage");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "not consumed by fragment shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) out float x;\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
" x = 0;\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineComplexTypes) {
TEST_DESCRIPTION("Smoke test for complex types across VS/FS boundary");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().tessellationShader) {
printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
char const *vsSource =
"#version 450\n"
"void main() {}";
char const *tcsSource =
"#version 450\n"
"layout(vertices=3) out;\n"
"struct S { int x; };\n"
"layout(location=2) patch out B { S s; } b;\n"
"void main() {\n"
" gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n"
" gl_TessLevelInner[0] = 1;\n"
" b.s.x = 1;\n"
"}\n";
char const *tesSource =
"#version 450\n"
"layout(triangles, equal_spacing, cw) in;\n"
"struct S { int x; };\n"
"layout(location=2) patch in B { S s; } b;\n"
"void main() { gl_Position = vec4(b.s.x); }\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) out vec4 c;\n"
"void main() { c = vec4(1); }\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this);
VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3};
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&tcs);
pipe.AddShader(&tes);
pipe.AddShader(&fs);
pipe.SetInputAssembly(&iasci);
pipe.SetTessellation(&tsci);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, CreatePipelineCheckShaderBadSpecialization) {
TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *bad_specialization_message =
"Specialization entry 0 (for constant id 0) references memory outside provided specialization data ";
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout (constant_id = 0) const float r = 0.0f;\n"
"layout(location = 0) out vec4 uFragColor;\n"
"void main(){\n"
" uFragColor = vec4(r,1,0,1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
const VkPipelineLayoutObj pipeline_layout(m_device);
VkPipelineViewportStateCreateInfo vp_state_create_info = {};
vp_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
vp_state_create_info.viewportCount = 1;
VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f};
vp_state_create_info.pViewports = &viewport;
vp_state_create_info.scissorCount = 1;
VkDynamicState scissor_state = VK_DYNAMIC_STATE_SCISSOR;
VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info = {};
pipeline_dynamic_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
pipeline_dynamic_state_create_info.dynamicStateCount = 1;
pipeline_dynamic_state_create_info.pDynamicStates = &scissor_state;
VkPipelineShaderStageCreateInfo shader_stage_create_info[2] = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()};
VkPipelineVertexInputStateCreateInfo vertex_input_create_info = {};
vertex_input_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
VkPipelineInputAssemblyStateCreateInfo input_assembly_create_info = {};
input_assembly_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_create_info.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
VkPipelineRasterizationStateCreateInfo rasterization_state_create_info = {};
rasterization_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state_create_info.pNext = nullptr;
rasterization_state_create_info.lineWidth = 1.0f;
rasterization_state_create_info.rasterizerDiscardEnable = true;
VkPipelineColorBlendAttachmentState color_blend_attachment_state = {};
color_blend_attachment_state.blendEnable = VK_FALSE;
color_blend_attachment_state.colorWriteMask = 0xf;
VkPipelineColorBlendStateCreateInfo color_blend_state_create_info = {};
color_blend_state_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
color_blend_state_create_info.attachmentCount = 1;
color_blend_state_create_info.pAttachments = &color_blend_attachment_state;
VkGraphicsPipelineCreateInfo graphicspipe_create_info = {};
graphicspipe_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
graphicspipe_create_info.stageCount = 2;
graphicspipe_create_info.pStages = shader_stage_create_info;
graphicspipe_create_info.pVertexInputState = &vertex_input_create_info;
graphicspipe_create_info.pInputAssemblyState = &input_assembly_create_info;
graphicspipe_create_info.pViewportState = &vp_state_create_info;
graphicspipe_create_info.pRasterizationState = &rasterization_state_create_info;
graphicspipe_create_info.pColorBlendState = &color_blend_state_create_info;
graphicspipe_create_info.pDynamicState = &pipeline_dynamic_state_create_info;
graphicspipe_create_info.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT;
graphicspipe_create_info.layout = pipeline_layout.handle();
graphicspipe_create_info.renderPass = renderPass();
VkPipelineCacheCreateInfo pipeline_cache_create_info = {};
pipeline_cache_create_info.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VkPipelineCache pipelineCache;
ASSERT_VK_SUCCESS(vkCreatePipelineCache(m_device->device(), &pipeline_cache_create_info, nullptr, &pipelineCache));
// This structure maps constant ids to data locations.
const VkSpecializationMapEntry entry =
// id, offset, size
{0, 4, sizeof(uint32_t)}; // Challenge core validation by using a bogus offset.
uint32_t data = 1;
// Set up the info describing spec map and data
const VkSpecializationInfo specialization_info = {
1,
&entry,
1 * sizeof(float),
&data,
};
shader_stage_create_info[0].pSpecializationInfo = &specialization_info;
VkPipeline pipeline;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, bad_specialization_message);
vkCreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &graphicspipe_create_info, nullptr, &pipeline);
m_errorMonitor->VerifyFound();
vkDestroyPipelineCache(m_device->device(), pipelineCache, nullptr);
}
TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorTypeMismatch) {
TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *descriptor_type_mismatch_message = "Type mismatch on descriptor slot 0.0 (used as type ";
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
char const *vsSource =
"#version 450\n"
"\n"
"layout (std140, set = 0, binding = 0) uniform buf {\n"
" mat4 mvp;\n"
"} ubuf;\n"
"void main(){\n"
" gl_Position = ubuf.mvp * vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location = 0) out vec4 uFragColor;\n"
"void main(){\n"
" uFragColor = vec4(0,1,0,1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_type_mismatch_message);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorNotAccessible) {
TEST_DESCRIPTION(
"Create a pipeline in which a descriptor used by a shader stage does not include that stage in its stageFlags.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *descriptor_not_accessible_message = "Shader uses descriptor slot 0.0 (used as type ";
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT /*!*/, nullptr},
});
char const *vsSource =
"#version 450\n"
"\n"
"layout (std140, set = 0, binding = 0) uniform buf {\n"
" mat4 mvp;\n"
"} ubuf;\n"
"void main(){\n"
" gl_Position = ubuf.mvp * vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location = 0) out vec4 uFragColor;\n"
"void main(){\n"
" uFragColor = vec4(0,1,0,1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, descriptor_not_accessible_message);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineCheckShaderPushConstantNotAccessible) {
TEST_DESCRIPTION(
"Create a graphics pipeline in which a push constant range containing a push constant block member is not accessible from "
"the current shader stage.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *push_constant_not_accessible_message =
"Push constant range covering variable starting at offset 0 not accessible from stage VK_SHADER_STAGE_VERTEX_BIT";
char const *vsSource =
"#version 450\n"
"\n"
"layout(push_constant, std430) uniform foo { float x; } consts;\n"
"void main(){\n"
" gl_Position = vec4(consts.x);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location = 0) out vec4 uFragColor;\n"
"void main(){\n"
" uFragColor = vec4(0,1,0,1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
// Set up a push constant range
VkPushConstantRange push_constant_range = {};
// Set to the wrong stage to challenge core_validation
push_constant_range.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
push_constant_range.size = 4;
const VkPipelineLayoutObj pipeline_layout(m_device, {}, {push_constant_range});
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, push_constant_not_accessible_message);
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineCheckShaderNotEnabled) {
TEST_DESCRIPTION(
"Create a graphics pipeline in which a capability declared by the shader requires a feature not enabled on the device.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
const char *feature_not_enabled_message =
"Shader requires VkPhysicalDeviceFeatures::shaderFloat64 but is not enabled on the device";
// Some awkward steps are required to test with custom device features.
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Disable support for 64 bit floats
features.shaderFloat64 = false;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" dvec4 green = vec4(0.0, 1.0, 0.0, 1.0);\n"
" color = vec4(green);\n"
"}\n";
VkShaderObj vs(&test_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(&test_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkRenderpassObj render_pass(&test_device);
VkPipelineObj pipe(&test_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
const VkPipelineLayoutObj pipeline_layout(&test_device);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, feature_not_enabled_message);
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreateShaderModuleCheckBadCapability) {
TEST_DESCRIPTION("Create a shader in which a capability declared by the shader is not supported.");
// Note that this failure message comes from spirv-tools, specifically the validator.
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"layout(xfb_buffer = 1) out;\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Capability TransformFeedback is not allowed by Vulkan");
std::vector<unsigned int> spv;
VkShaderModuleCreateInfo module_create_info;
VkShaderModule shader_module;
module_create_info.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
module_create_info.pNext = NULL;
this->GLSLtoSPV(VK_SHADER_STAGE_VERTEX_BIT, vsSource, spv);
module_create_info.pCode = spv.data();
module_create_info.codeSize = spv.size() * sizeof(unsigned int);
module_create_info.flags = 0;
vkCreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module);
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension1of2) {
// This is a positive test, no errors expected
// Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID
TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 1 of 2");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix,
VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
// These tests require that the device support multiViewport
if (!m_device->phy().features().multiViewport) {
printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader using viewport array capability
char const *vsSource =
"#version 450\n"
"#extension GL_ARB_shader_viewport_layer_array : enable\n"
"void main() {\n"
" gl_ViewportIndex = 1;\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
const VkPipelineLayoutObj pipe_layout(m_device, {});
m_errorMonitor->ExpectSuccess();
pipe.CreateVKPipeline(pipe_layout.handle(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineCheckShaderCapabilityExtension2of2) {
// This is a positive test, no errors expected
// Verifies the ability to deal with a shader that declares a non-unique SPIRV capability ID
TEST_DESCRIPTION("Create a shader in which uses a non-unique capability ID extension, 2 of 2");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME)) {
printf("%s Extension %s not supported, skipping this pass. \n", kSkipPrefix, VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME);
return;
}
m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME);
ASSERT_NO_FATAL_FAILURE(InitState());
// These tests require that the device support multiViewport
if (!m_device->phy().features().multiViewport) {
printf("%s Device does not support multiViewport, test skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Vertex shader using viewport array capability
char const *vsSource =
"#version 450\n"
"#extension GL_ARB_shader_viewport_layer_array : enable\n"
"void main() {\n"
" gl_ViewportIndex = 1;\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
const VkPipelineLayoutObj pipe_layout(m_device, {});
m_errorMonitor->ExpectSuccess();
pipe.CreateVKPipeline(pipe_layout.handle(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvided) {
TEST_DESCRIPTION(
"Test that an error is produced for a fragment shader input which is not present in the outputs of the previous stage");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) in float x;\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvidedInBlock) {
TEST_DESCRIPTION(
"Test that an error is produced for a fragment shader input within an interace block, which is not present in the outputs "
"of the previous stage.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not written by vertex shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"in block { layout(location=0) float x; } ins;\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(ins.x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchArraySize) {
TEST_DESCRIPTION("Test that an error is produced for mismatched array sizes across the vertex->fragment shader interface");
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Type mismatch on location 0.0: 'ptr to output arr[2] of float32' vs 'ptr to input arr[1] of float32'");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) out float x[2];\n"
"void main(){\n"
" x[0] = 0; x[1] = 0;\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) in float x[1];\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(x[0]);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatch) {
TEST_DESCRIPTION("Test that an error is produced for mismatched types across the vertex->fragment shader interface");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) out int x;\n"
"void main(){\n"
" x = 0;\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) in float x;\n" /* VS writes int */
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchInBlock) {
TEST_DESCRIPTION(
"Test that an error is produced for mismatched types across the vertex->fragment shader interface, when the variable is "
"contained within an interface block");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Type mismatch on location 0");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"out block { layout(location=0) int x; } outs;\n"
"void main(){\n"
" outs.x = 0;\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"in block { layout(location=0) float x; } ins;\n" /* VS writes int */
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(ins.x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByLocation) {
TEST_DESCRIPTION(
"Test that an error is produced for location mismatches across the vertex->fragment shader interface; This should manifest "
"as a not-written/not-consumed pair, but flushes out broken walking of the interfaces");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.0 which is not written by vertex shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"out block { layout(location=1) float x; } outs;\n"
"void main(){\n"
" outs.x = 0;\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"in block { layout(location=0) float x; } ins;\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(ins.x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByComponent) {
TEST_DESCRIPTION(
"Test that an error is produced for component mismatches across the vertex->fragment shader interface. It's not enough to "
"have the same set of locations in use; matching is defined in terms of spirv variables.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0.1 which is not written by vertex shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"out block { layout(location=0, component=0) float x; } outs;\n"
"void main(){\n"
" outs.x = 0;\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"in block { layout(location=0, component=1) float x; } ins;\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(ins.x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecision) {
TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"layout(location=0) out mediump float x;\n"
"void main() { gl_Position = vec4(0); x = 1.0; }\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) in highp float x;\n"
"layout(location=0) out vec4 color;\n"
"void main() { color = vec4(x); }\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecisionBlock) {
TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"out block { layout(location=0) mediump float x; };\n"
"void main() { gl_Position = vec4(0); x = 1.0; }\n";
char const *fsSource =
"#version 450\n"
"in block { layout(location=0) highp float x; };\n"
"layout(location=0) out vec4 color;\n"
"void main() { color = vec4(x); }\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "differ in precision");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineAttribNotConsumed) {
TEST_DESCRIPTION("Test that a warning is produced for a vertex attribute which is not consumed by the vertex shader");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attrib;
memset(&input_attrib, 0, sizeof(input_attrib));
input_attrib.format = VK_FORMAT_R32_SFLOAT;
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(&input_attrib, 1);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineAttribLocationMismatch) {
TEST_DESCRIPTION(
"Test that a warning is produced for a location mismatch on vertex attributes. This flushes out bad behavior in the "
"interface walker");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, "location 0 not consumed by vertex shader");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attrib;
memset(&input_attrib, 0, sizeof(input_attrib));
input_attrib.format = VK_FORMAT_R32_SFLOAT;
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=1) in float x;\n"
"void main(){\n"
" gl_Position = vec4(x);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(&input_attrib, 1);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
m_errorMonitor->SetUnexpectedError("Vertex shader consumes input at location 1 but not provided");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineAttribNotProvided) {
TEST_DESCRIPTION("Test that an error is produced for a vertex shader input which is not provided by a vertex attribute");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Vertex shader consumes input at location 0 but not provided");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in vec4 x;\n" /* not provided */
"void main(){\n"
" gl_Position = x;\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineAttribTypeMismatch) {
TEST_DESCRIPTION(
"Test that an error is produced for a mismatch between the fundamental type (float/int/uint) of an attribute and the "
"vertex shader input that consumes it");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "location 0 does not match vertex shader input type");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attrib;
memset(&input_attrib, 0, sizeof(input_attrib));
input_attrib.format = VK_FORMAT_R32_SFLOAT;
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in int x;\n" /* attrib provided float */
"void main(){\n"
" gl_Position = vec4(x);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(&input_attrib, 1);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineDuplicateStage) {
TEST_DESCRIPTION("Test that an error is produced for a pipeline containing multiple shaders for the same stage");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Multiple shaders provided for stage VK_SHADER_STAGE_VERTEX_BIT");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&vs); // intentionally duplicate vertex shader attachment
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineMissingEntrypoint) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "No entrypoint found named `foo`");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"void main(){\n"
" gl_Position = vec4(0);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this, "foo");
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineDepthStencilRequired) {
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"pDepthStencilState is NULL when rasterization is enabled and subpass uses a depth/stencil attachment");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"void main(){ gl_Position = vec4(0); }\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkAttachmentDescription attachments[] = {
{
0,
VK_FORMAT_B8G8R8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
},
{
0,
VK_FORMAT_D16_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
},
};
VkAttachmentReference refs[] = {
{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL},
};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &refs[0], nullptr, &refs[1], 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attachments, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), rp);
m_errorMonitor->VerifyFound();
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, CreatePipelineTessPatchDecorationMismatch) {
TEST_DESCRIPTION(
"Test that an error is produced for a variable output from the TCS without the patch decoration, but consumed in the TES "
"with the decoration.");
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"is per-vertex in tessellation control shader stage but per-patch in tessellation evaluation shader stage");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().tessellationShader) {
printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix);
return;
}
char const *vsSource =
"#version 450\n"
"void main(){}\n";
char const *tcsSource =
"#version 450\n"
"layout(location=0) out int x[];\n"
"layout(vertices=3) out;\n"
"void main(){\n"
" gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n"
" gl_TessLevelInner[0] = 1;\n"
" x[gl_InvocationID] = gl_InvocationID;\n"
"}\n";
char const *tesSource =
"#version 450\n"
"layout(triangles, equal_spacing, cw) in;\n"
"layout(location=0) patch in int x;\n"
"void main(){\n"
" gl_Position.xyz = gl_TessCoord;\n"
" gl_Position.w = x;\n"
"}\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this);
VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3};
VkPipelineObj pipe(m_device);
pipe.SetInputAssembly(&iasci);
pipe.SetTessellation(&tsci);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&tcs);
pipe.AddShader(&tes);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineTessErrors) {
TEST_DESCRIPTION("Test various errors when creating a graphics pipeline with tessellation stages active.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().tessellationShader) {
printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix);
return;
}
char const *vsSource =
"#version 450\n"
"void main(){}\n";
char const *tcsSource =
"#version 450\n"
"layout(vertices=3) out;\n"
"void main(){\n"
" gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n"
" gl_TessLevelInner[0] = 1;\n"
"}\n";
char const *tesSource =
"#version 450\n"
"layout(triangles, equal_spacing, cw) in;\n"
"void main(){\n"
" gl_Position.xyz = gl_TessCoord;\n"
" gl_Position.w = 0;\n"
"}\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this);
VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3};
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
{
VkPipelineObj pipe(m_device);
VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci;
iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology
pipe.SetInputAssembly(&iasci_bad);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
// Pass a tess control shader without a tess eval shader
pipe.AddShader(&tcs);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
{
VkPipelineObj pipe(m_device);
VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci;
iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology
pipe.SetInputAssembly(&iasci_bad);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
// Pass a tess eval shader without a tess control shader
pipe.AddShader(&tes);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
{
VkPipelineObj pipe(m_device);
pipe.SetInputAssembly(&iasci);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
// Pass patch topology without tessellation shaders
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-topology-00737");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
pipe.AddShader(&tcs);
pipe.AddShader(&tes);
// Pass a NULL pTessellationState (with active tessellation shader stages)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00731");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
// Pass an invalid pTessellationState (bad sType)
VkPipelineTessellationStateCreateInfo tsci_bad = tsci;
tsci_bad.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
pipe.SetTessellation(&tsci_bad);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineTessellationStateCreateInfo-sType-sType");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
// Pass out-of-range patchControlPoints
tsci_bad = tsci;
tsci_bad.patchControlPoints = 0;
pipe.SetTessellation(&tsci);
pipe.SetTessellation(&tsci_bad);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
tsci_bad.patchControlPoints = m_device->props.limits.maxTessellationPatchSize + 1;
pipe.SetTessellation(&tsci_bad);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
pipe.SetTessellation(&tsci);
// Pass an invalid primitive topology
VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci;
iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
pipe.SetInputAssembly(&iasci_bad);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736");
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
pipe.SetInputAssembly(&iasci);
}
}
TEST_F(VkLayerTest, CreatePipelineAttribBindingConflict) {
TEST_DESCRIPTION(
"Test that an error is produced for a vertex attribute setup where multiple bindings provide the same location");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Duplicate vertex input binding descriptions for binding 0");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
/* Two binding descriptions for binding 0 */
VkVertexInputBindingDescription input_bindings[2];
memset(input_bindings, 0, sizeof(input_bindings));
VkVertexInputAttributeDescription input_attrib;
memset(&input_attrib, 0, sizeof(input_attrib));
input_attrib.format = VK_FORMAT_R32_SFLOAT;
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in float x;\n" /* attrib provided float */
"void main(){\n"
" gl_Position = vec4(x);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(input_bindings, 2);
pipe.AddVertexInputAttribs(&input_attrib, 1);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotWritten) {
TEST_DESCRIPTION(
"Test that an error is produced for a fragment shader which does not provide an output for one of the pipeline's color "
"attachments");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Attachment 0 not written by fragment shader");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"void main(){\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0, not written */
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineFragmentOutputNotWrittenButMasked) {
TEST_DESCRIPTION(
"Test that no error is produced when the fragment shader fails to declare an output, but the corresponding attachment's "
"write mask is 0.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"void main(){\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0, not written, but also masked */
pipe.AddDefaultColorAttachment(0);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotConsumed) {
TEST_DESCRIPTION(
"Test that a warning is produced for a fragment shader which provides a spurious output with no matching attachment");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
"fragment shader writes to output location 1 with no matching attachment");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(location=1) out vec4 y;\n" /* no matching attachment for this */
"void main(){\n"
" x = vec4(1);\n"
" y = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0, not written */
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
/* FS writes CB 1, but we don't configure it */
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineFragmentOutputTypeMismatch) {
TEST_DESCRIPTION(
"Test that an error is produced for a mismatch between the fundamental type of an fragment shader output variable, and the "
"format of the corresponding attachment");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "does not match fragment shader output type");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out ivec4 x;\n" /* not UNORM */
"void main(){\n"
" x = ivec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0; type is UNORM by default */
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineUniformBlockNotProvided) {
TEST_DESCRIPTION(
"Test that an error is produced for a shader consuming a uniform block which has no corresponding binding in the pipeline "
"layout");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in pipeline layout");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=0) uniform foo { int x; int y; } bar;\n"
"void main(){\n"
" x = vec4(bar.y);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0; type is UNORM by default */
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelinePushConstantsNotInLayout) {
TEST_DESCRIPTION(
"Test that an error is produced for a shader consuming push constants which are not provided in the pipeline layout");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "not declared in layout");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"layout(push_constant, std430) uniform foo { float x; } consts;\n"
"void main(){\n"
" gl_Position = vec4(consts.x);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"void main(){\n"
" x = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
/* set up CB 0; type is UNORM by default */
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
/* should have generated an error -- no push constant ranges provided! */
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissing) {
TEST_DESCRIPTION(
"Test that an error is produced for a shader consuming an input attachment which is not included in the subpass "
"description");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"consumes input attachment index 0 but not provided in subpass");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n"
"layout(location=0) out vec4 color;\n"
"void main() {\n"
" color = subpassLoad(x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
const VkDescriptorSetLayoutObj dsl(m_device, {dslb});
const VkPipelineLayoutObj pl(m_device, {&dsl});
// error here.
pipe.CreateVKPipeline(pl.handle(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreatePipelineInputAttachmentTypeMismatch) {
TEST_DESCRIPTION(
"Test that an error is produced for a shader consuming an input attachment with a format having a different fundamental "
"type");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"input attachment 0 format of VK_FORMAT_R8G8B8A8_UINT does not match");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n"
"layout(location=0) out vec4 color;\n"
"void main() {\n"
" color = subpassLoad(x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
const VkDescriptorSetLayoutObj dsl(m_device, {dslb});
const VkPipelineLayoutObj pl(m_device, {&dsl});
VkAttachmentDescription descs[2] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{0, VK_FORMAT_R8G8B8A8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
};
VkAttachmentReference color = {
0,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
VkAttachmentReference input = {
1,
VK_IMAGE_LAYOUT_GENERAL,
};
VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// error here.
pipe.CreateVKPipeline(pl.handle(), rp);
m_errorMonitor->VerifyFound();
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissingArray) {
TEST_DESCRIPTION(
"Test that an error is produced for a shader consuming an input attachment which is not included in the subpass "
"description -- array case");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"consumes input attachment index 0 but not provided in subpass");
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput xs[1];\n"
"layout(location=0) out vec4 color;\n"
"void main() {\n"
" color = subpassLoad(xs[0]);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 2, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
const VkDescriptorSetLayoutObj dsl(m_device, {dslb});
const VkPipelineLayoutObj pl(m_device, {&dsl});
// error here.
pipe.CreateVKPipeline(pl.handle(), renderPass());
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreateComputePipelineMissingDescriptor) {
TEST_DESCRIPTION(
"Test that an error is produced for a compute pipeline consuming a descriptor which is not provided in the pipeline "
"layout");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "Shader uses descriptor slot 0.0");
ASSERT_NO_FATAL_FAILURE(Init());
char const *csSource =
"#version 450\n"
"\n"
"layout(local_size_x=1) in;\n"
"layout(set=0, binding=0) buffer block { vec4 x; };\n"
"void main(){\n"
" x = vec4(1);\n"
"}\n";
VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
nullptr,
0,
{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0,
VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr},
descriptorSet.GetPipelineLayout(),
VK_NULL_HANDLE,
-1};
VkPipeline pipe;
VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyPipeline(m_device->device(), pipe, nullptr);
}
}
TEST_F(VkLayerTest, CreateComputePipelineDescriptorTypeMismatch) {
TEST_DESCRIPTION("Test that an error is produced for a pipeline consuming a descriptor-backed resource of a mismatched type");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"but descriptor of type VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr};
const VkDescriptorSetLayoutObj dsl(m_device, {binding});
const VkPipelineLayoutObj pl(m_device, {&dsl});
char const *csSource =
"#version 450\n"
"\n"
"layout(local_size_x=1) in;\n"
"layout(set=0, binding=0) buffer block { vec4 x; };\n"
"void main() {\n"
" x.x = 1.0f;\n"
"}\n";
VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
nullptr,
0,
{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0,
VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr},
pl.handle(),
VK_NULL_HANDLE,
-1};
VkPipeline pipe;
VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe);
m_errorMonitor->VerifyFound();
if (err == VK_SUCCESS) {
vkDestroyPipeline(m_device->device(), pipe, nullptr);
}
}
TEST_F(VkLayerTest, DrawTimeImageViewTypeMismatchWithPipeline) {
TEST_DESCRIPTION(
"Test that an error is produced when an image view type does not match the dimensionality declared in the shader");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires an image view of type VK_IMAGE_VIEW_TYPE_3D");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"void main() { gl_Position = vec4(0); }\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler3D s;\n"
"layout(location=0) out vec4 color;\n"
"void main() {\n"
" color = texture(s, vec3(0));\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
VkTextureObj texture(m_device, nullptr);
VkSamplerObj sampler(m_device);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendSamplerTexture(&sampler, &texture);
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
m_commandBuffer->BindDescriptorSet(descriptorSet);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// error produced here.
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, DrawTimeImageMultisampleMismatchWithPipeline) {
TEST_DESCRIPTION(
"Test that an error is produced when a multisampled images are consumed via singlesample images types in the shader, or "
"vice versa.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "requires bound image to have multiple samples");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"\n"
"void main() { gl_Position = vec4(0); }\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(set=0, binding=0) uniform sampler2DMS s;\n"
"layout(location=0) out vec4 color;\n"
"void main() {\n"
" color = texelFetch(s, ivec2(0), 0);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
VkTextureObj texture(m_device, nullptr); // THIS LINE CAUSES CRASH ON MALI
VkSamplerObj sampler(m_device);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendSamplerTexture(&sampler, &texture);
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkResult err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
m_commandBuffer->BindDescriptorSet(descriptorSet);
VkViewport viewport = {0, 0, 16, 16, 0, 1};
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport);
VkRect2D scissor = {{0, 0}, {16, 16}};
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor);
// error produced here.
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
m_errorMonitor->VerifyFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, AttachmentDescriptionUndefinedFormat) {
TEST_DESCRIPTION("Create a render pass with an attachment description format set to VK_FORMAT_UNDEFINED");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT, "format is VK_FORMAT_UNDEFINED");
VkAttachmentReference color_attach = {};
color_attach.layout = VK_IMAGE_LAYOUT_GENERAL;
color_attach.attachment = 0;
VkSubpassDescription subpass = {};
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &color_attach;
VkRenderPassCreateInfo rpci = {};
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
rpci.attachmentCount = 1;
VkAttachmentDescription attach_desc = {};
attach_desc.format = VK_FORMAT_UNDEFINED;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL;
rpci.pAttachments = &attach_desc;
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
VkRenderPass rp;
VkResult result = vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
if (result == VK_SUCCESS) {
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
}
TEST_F(VkLayerTest, AttachmentDescriptionInvalidFinalLayout) {
TEST_DESCRIPTION("VkAttachmentDescription's finalLayout must not be UNDEFINED or PREINITIALIZED");
ASSERT_NO_FATAL_FAILURE(Init());
VkAttachmentDescription attach_desc = {};
attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkAttachmentReference attach_ref = {};
attach_ref.attachment = 0;
attach_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &attach_ref;
VkRenderPassCreateInfo rpci = {};
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rpci.attachmentCount = 1;
rpci.pAttachments = &attach_desc;
rpci.subpassCount = 1;
rpci.pSubpasses = &subpass;
VkRenderPass rp = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-finalLayout-00843");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
if (rp != VK_NULL_HANDLE) {
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
attach_desc.finalLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkAttachmentDescription-finalLayout-00843");
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyFound();
if (rp != VK_NULL_HANDLE) {
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
}
TEST_F(VkLayerTest, CreateImageViewNoMemoryBoundToImage) {
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
" used with no memory bound. Memory should be bound by calling vkBindImageMemory().");
ASSERT_NO_FATAL_FAILURE(Init());
// Create an image and try to create a view with no memory backing the image
VkImage image;
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image;
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = tex_format;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
VkImageView view;
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image, NULL);
// If last error is success, it still created the view, so delete it.
if (err == VK_SUCCESS) {
vkDestroyImageView(m_device->device(), view, NULL);
}
}
TEST_F(VkLayerTest, InvalidImageViewAspect) {
TEST_DESCRIPTION("Create an image and try to create a view with an invalid aspectMask");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter");
ASSERT_NO_FATAL_FAILURE(Init());
const VkFormat tex_format = VK_FORMAT_B8G8R8A8_UNORM;
VkImageObj image(m_device);
image.Init(32, 32, 1, tex_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_LINEAR, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image.handle();
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = tex_format;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.layerCount = 1;
// Cause an error by setting an invalid image aspect
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT;
VkImageView view;
vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ExerciseGetImageSubresourceLayout) {
TEST_DESCRIPTION("Test vkGetImageSubresourceLayout() valid usages");
ASSERT_NO_FATAL_FAILURE(Init());
VkSubresourceLayout subres_layout = {};
// VU 00732: image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR
{
const VkImageTiling tiling = VK_IMAGE_TILING_OPTIMAL; // ERROR: violates VU 00732
VkImageObj img(m_device);
img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, tiling);
ASSERT_TRUE(img.initialized());
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subres.mipLevel = 0;
subres.arrayLayer = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-image-00996");
vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout);
m_errorMonitor->VerifyFound();
}
// VU 00733: The aspectMask member of pSubresource must only have a single bit set
{
VkImageObj img(m_device);
img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
ASSERT_TRUE(img.initialized());
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_METADATA_BIT; // ERROR: triggers VU 00733
subres.mipLevel = 0;
subres.arrayLayer = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-aspectMask-00997");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresource-aspectMask-parameter");
vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout);
m_errorMonitor->VerifyFound();
}
// 00739 mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created
{
VkImageObj img(m_device);
img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
ASSERT_TRUE(img.initialized());
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subres.mipLevel = 1; // ERROR: triggers VU 00739
subres.arrayLayer = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-mipLevel-01716");
vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout);
m_errorMonitor->VerifyFound();
}
// 00740 arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created
{
VkImageObj img(m_device);
img.InitNoLayout(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
ASSERT_TRUE(img.initialized());
VkImageSubresource subres = {};
subres.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subres.mipLevel = 0;
subres.arrayLayer = 1; // ERROR: triggers VU 00740
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717");
vkGetImageSubresourceLayout(m_device->device(), img.image(), &subres, &subres_layout);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, CopyImageLayerCountMismatch) {
TEST_DESCRIPTION(
"Try to copy between images with the source subresource having a different layerCount than the destination subresource");
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images to copy between
VkImageObj src_image_obj(m_device);
VkImageObj dst_image_obj(m_device);
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 4;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.flags = 0;
src_image_obj.init(&image_create_info);
ASSERT_TRUE(src_image_obj.initialized());
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
dst_image_obj.init(&image_create_info);
ASSERT_TRUE(dst_image_obj.initialized());
m_commandBuffer->begin();
VkImageCopy copyRegion;
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset.x = 0;
copyRegion.srcOffset.y = 0;
copyRegion.srcOffset.z = 0;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
// Introduce failure by forcing the dst layerCount to differ from src
copyRegion.dstSubresource.layerCount = 3;
copyRegion.dstOffset.x = 0;
copyRegion.dstOffset.y = 0;
copyRegion.dstOffset.z = 0;
copyRegion.extent.width = 1;
copyRegion.extent.height = 1;
copyRegion.extent.depth = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-extent-00140");
m_commandBuffer->CopyImage(src_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image_obj.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copyRegion);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ImageLayerUnsupportedFormat) {
TEST_DESCRIPTION("Creating images with unsupported formats ");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Create image with unsupported format - Expect FORMAT_UNSUPPORTED
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_UNDEFINED;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreateImage: VkFormat for image must not be VK_FORMAT_UNDEFINED");
VkImage image;
vkCreateImage(m_device->handle(), &image_create_info, NULL, &image);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreateImageViewFormatMismatchUnrelated) {
TEST_DESCRIPTION("Create an image with a color format, then try to create a depth view of it");
if (!EnableDeviceProfileLayer()) {
printf("%s Failed to enable device profile layer.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
// Load required functions
PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT =
(PFN_vkSetPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(), "vkSetPhysicalDeviceFormatPropertiesEXT");
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT =
(PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT)vkGetInstanceProcAddr(instance(),
"vkGetOriginalPhysicalDeviceFormatPropertiesEXT");
if (!(fpvkSetPhysicalDeviceFormatPropertiesEXT) || !(fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix);
return;
}
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s Couldn't find depth stencil image format.\n", kSkipPrefix);
return;
}
VkFormatProperties formatProps;
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, &formatProps);
formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), depth_format, formatProps);
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView imgView;
VkImageViewCreateInfo imgViewInfo = {};
imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imgViewInfo.image = image.handle();
imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imgViewInfo.format = depth_format;
imgViewInfo.subresourceRange.layerCount = 1;
imgViewInfo.subresourceRange.baseMipLevel = 0;
imgViewInfo.subresourceRange.levelCount = 1;
imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Can't use depth format for view into color image - Expect INVALID_FORMAT
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.");
vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreateImageViewNoMutableFormatBit) {
TEST_DESCRIPTION("Create an image view with a different format, when the image does not have MUTABLE_FORMAT bit");
if (!EnableDeviceProfileLayer()) {
printf("%s Couldn't enable device profile layer.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr;
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr;
// Load required functions
if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
printf("%s Required extensions are not present.\n", kSkipPrefix);
return;
}
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkFormatProperties formatProps;
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, &formatProps);
formatProps.optimalTilingFeatures |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_B8G8R8A8_UINT, formatProps);
VkImageView imgView;
VkImageViewCreateInfo imgViewInfo = {};
imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imgViewInfo.image = image.handle();
imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imgViewInfo.format = VK_FORMAT_B8G8R8A8_UINT;
imgViewInfo.subresourceRange.layerCount = 1;
imgViewInfo.subresourceRange.baseMipLevel = 0;
imgViewInfo.subresourceRange.levelCount = 1;
imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Same compatibility class but no MUTABLE_FORMAT bit - Expect
// VIEW_CREATE_ERROR
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01019");
vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreateImageViewDifferentClass) {
TEST_DESCRIPTION("Passing bad parameters to CreateImageView");
ASSERT_NO_FATAL_FAILURE(Init());
if (!(m_device->format_properties(VK_FORMAT_R8_UINT).optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
printf("%s Device does not support R8_UINT as color attachment; skipped", kSkipPrefix);
return;
}
VkImageCreateInfo mutImgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R8_UINT,
{128, 128, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj mutImage(m_device);
mutImage.init(&mutImgInfo);
ASSERT_TRUE(mutImage.initialized());
VkImageView imgView;
VkImageViewCreateInfo imgViewInfo = {};
imgViewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
imgViewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
imgViewInfo.format = VK_FORMAT_B8G8R8A8_UNORM;
imgViewInfo.subresourceRange.layerCount = 1;
imgViewInfo.subresourceRange.baseMipLevel = 0;
imgViewInfo.subresourceRange.levelCount = 1;
imgViewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imgViewInfo.image = mutImage.handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01018");
vkCreateImageView(m_device->handle(), &imgViewInfo, NULL, &imgView);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, MultiplaneIncompatibleViewFormat) {
TEST_DESCRIPTION("Postive/negative tests of multiplane imageview format compatibility");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify format
VkFormatFeatureFlags features = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return;
}
VkImageObj image_obj(m_device);
image_obj.init(&ci);
ASSERT_TRUE(image_obj.initialized());
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image_obj.image();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8_SNORM; // Compat is VK_FORMAT_R8_UNORM
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT;
// Incompatible format error
VkImageView imageView = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-image-01586");
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyFound();
vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed
imageView = VK_NULL_HANDLE;
// Correct format succeeds
ivci.format = VK_FORMAT_R8_UNORM;
m_errorMonitor->ExpectSuccess();
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyNotFound();
vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed
imageView = VK_NULL_HANDLE;
// Try a multiplane imageview
ivci.format = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->ExpectSuccess();
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyNotFound();
vkDestroyImageView(m_device->device(), imageView, NULL); // VK_NULL_HANDLE allowed
}
TEST_F(VkLayerTest, CreateImageViewInvalidSubresourceRange) {
TEST_DESCRIPTION("Passing bad image subrange to CreateImageView");
ASSERT_NO_FATAL_FAILURE(Init());
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
VkImageView img_view;
VkImageViewCreateInfo img_view_info_template = {};
img_view_info_template.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
img_view_info_template.image = image.handle();
img_view_info_template.viewType = VK_IMAGE_VIEW_TYPE_2D_ARRAY;
img_view_info_template.format = image.format();
// subresourceRange to be filled later for the purposes of this test
img_view_info_template.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_view_info_template.subresourceRange.baseMipLevel = 0;
img_view_info_template.subresourceRange.levelCount = 0;
img_view_info_template.subresourceRange.baseArrayLayer = 0;
img_view_info_template.subresourceRange.layerCount = 0;
// Try baseMipLevel >= image.mipLevels with VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, VK_REMAINING_MIP_LEVELS, 0, 1};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel >= image.mipLevels without VK_REMAINING_MIP_LEVELS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01478");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 1, 1, 0, 1};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// Try levelCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 0, 1};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// Try baseMipLevel + levelCount > image.mipLevels
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageViewCreateInfo-subresourceRange-01718");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 2, 0, 1};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// These tests rely on having the Maintenance1 extension not being enabled, and are invalid on all but version 1.0
if (m_device->props.apiVersion < VK_API_VERSION_1_1) {
// Try baseArrayLayer >= image.arrayLayers with VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageViewCreateInfo-subresourceRange-01480");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, VK_REMAINING_ARRAY_LAYERS};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer >= image.arrayLayers without VK_REMAINING_ARRAY_LAYERS
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageViewCreateInfo-subresourceRange-01480");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageViewCreateInfo-subresourceRange-01719");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 1, 1};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// Try layerCount = 0
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCreateImageView: if pCreateInfo->viewType is VK_IMAGE_TYPE_2D_ARRAY, "
"pCreateInfo->subresourceRange.layerCount must be >= 1");
// TODO: The test environment aborts the Vulkan call in parameter_validation layer before
// "VUID-VkImageViewCreateInfo-subresourceRange-01719" test
// m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
// "VUID-VkImageViewCreateInfo-subresourceRange-01719");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 0};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
// Try baseArrayLayer + layerCount > image.arrayLayers
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageViewCreateInfo-subresourceRange-01719");
const VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 2};
VkImageViewCreateInfo img_view_info = img_view_info_template;
img_view_info.subresourceRange = range;
vkCreateImageView(m_device->handle(), &img_view_info, nullptr, &img_view);
m_errorMonitor->VerifyFound();
}
}
}
TEST_F(VkLayerTest, CompressedImageMipCopyTests) {
TEST_DESCRIPTION("Image/Buffer copies for higher mip levels");
ASSERT_NO_FATAL_FAILURE(Init());
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
VkFormat compressed_format = VK_FORMAT_UNDEFINED;
if (device_features.textureCompressionBC) {
compressed_format = VK_FORMAT_BC3_SRGB_BLOCK;
} else if (device_features.textureCompressionETC2) {
compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
} else if (device_features.textureCompressionASTC_LDR) {
compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
} else {
printf("%s No compressed formats supported - CompressedImageMipCopyTests skipped.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = compressed_format;
ci.extent = {32, 32, 1};
ci.mipLevels = 6;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj image(m_device);
image.init(&ci);
ASSERT_TRUE(image.initialized());
VkImageObj odd_image(m_device);
ci.extent = {31, 32, 1}; // Mips are [31,32] [15,16] [7,8] [3,4], [1,2] [1,1]
odd_image.init(&ci);
ASSERT_TRUE(odd_image.initialized());
// Allocate buffers
VkMemoryPropertyFlags reqs = 0;
VkBufferObj buffer_1024, buffer_64, buffer_16, buffer_8;
buffer_1024.init_as_src_and_dst(*m_device, 1024, reqs);
buffer_64.init_as_src_and_dst(*m_device, 64, reqs);
buffer_16.init_as_src_and_dst(*m_device, 16, reqs);
buffer_8.init_as_src_and_dst(*m_device, 8, reqs);
VkBufferImageCopy region = {};
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageOffset = {0, 0, 0};
region.bufferOffset = 0;
// start recording
m_commandBuffer->begin();
// Mip level copies that work - 5 levels
m_errorMonitor->ExpectSuccess();
// Mip 0 should fit in 1k buffer - 1k texels @ 1b each
region.imageExtent = {32, 32, 1};
region.imageSubresource.mipLevel = 0;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_1024.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_1024.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
// Mip 2 should fit in 64b buffer - 64 texels @ 1b each
region.imageExtent = {8, 8, 1};
region.imageSubresource.mipLevel = 2;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
// Mip 3 should fit in 16b buffer - 16 texels @ 1b each
region.imageExtent = {4, 4, 1};
region.imageSubresource.mipLevel = 3;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
// Mip 4&5 should fit in 16b buffer with no complaint - 4 & 1 texels @ 1b each
region.imageExtent = {2, 2, 1};
region.imageSubresource.mipLevel = 4;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
region.imageExtent = {1, 1, 1};
region.imageSubresource.mipLevel = 5;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyNotFound();
// Buffer must accommodate a full compressed block, regardless of texel count
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183");
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_8.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00171");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_8.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
// Copy width < compressed block size, but not the full mip width
region.imageExtent = {1, 2, 1};
region.imageSubresource.mipLevel = 4;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00207"); // width not a multiple of compressed block width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
// Copy height < compressed block size but not the full mip height
region.imageExtent = {2, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00208"); // height not a multiple of compressed block width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
// Offsets must be multiple of compressed block size
region.imageOffset = {1, 1, 0};
region.imageExtent = {1, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageOffset-00205"); // imageOffset not a multiple of block size
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
// Offset + extent width = mip width - should succeed
region.imageOffset = {4, 4, 0};
region.imageExtent = {3, 4, 1};
region.imageSubresource.mipLevel = 2;
m_errorMonitor->ExpectSuccess();
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyNotFound();
// Offset + extent width > mip width, but still within the final compressed block - should succeed
region.imageExtent = {4, 4, 1};
m_errorMonitor->ExpectSuccess();
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyNotFound();
// Offset + extent width < mip width and not a multiple of block width - should fail
region.imageExtent = {3, 3, 1};
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00208"); // offset+extent not a multiple of block width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-imageOffset-01793"); // image transfer granularity
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16.handle(), odd_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ImageBufferCopyTests) {
TEST_DESCRIPTION("Image to buffer and buffer to image tests");
ASSERT_NO_FATAL_FAILURE(Init());
// Bail if any dimension of transfer granularity is 0.
auto index = m_device->graphics_queue_node_index_;
auto queue_family_properties = m_device->phy().queue_properties();
if ((queue_family_properties[index].minImageTransferGranularity.depth == 0) ||
(queue_family_properties[index].minImageTransferGranularity.width == 0) ||
(queue_family_properties[index].minImageTransferGranularity.height == 0)) {
printf("%s Subresource copies are disallowed when xfer granularity (x|y|z) is 0. Skipped.\n", kSkipPrefix);
return;
}
VkImageObj image_64k(m_device); // 128^2 texels, 64k
VkImageObj image_16k(m_device); // 64^2 texels, 16k
VkImageObj image_16k_depth(m_device); // 64^2 texels, depth, 16k
VkImageObj ds_image_4D_1S(m_device); // 256^2 texels, 512kb (256k depth, 64k stencil, 192k pack)
VkImageObj ds_image_3D_1S(m_device); // 256^2 texels, 256kb (192k depth, 64k stencil)
VkImageObj ds_image_2D(m_device); // 256^2 texels, 128k (128k depth)
VkImageObj ds_image_1S(m_device); // 256^2 texels, 64k (64k stencil)
image_64k.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
image_16k.Init(64, 64, 1, VK_FORMAT_R8G8B8A8_UINT,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image_64k.initialized());
ASSERT_TRUE(image_16k.initialized());
// Verify all needed Depth/Stencil formats are supported
bool missing_ds_support = false;
VkFormatProperties props = {0, 0, 0};
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT_S8_UINT, &props);
missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0);
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0;
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D24_UNORM_S8_UINT, &props);
missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0);
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0;
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D16_UNORM, &props);
missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0);
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0;
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_S8_UINT, &props);
missing_ds_support |= (props.bufferFeatures == 0 && props.linearTilingFeatures == 0 && props.optimalTilingFeatures == 0);
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT) == 0;
missing_ds_support |= (props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_DST_BIT) == 0;
if (!missing_ds_support) {
image_16k_depth.Init(64, 64, 1, VK_FORMAT_D24_UNORM_S8_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image_16k_depth.initialized());
ds_image_4D_1S.Init(
256, 256, 1, VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(ds_image_4D_1S.initialized());
ds_image_3D_1S.Init(
256, 256, 1, VK_FORMAT_D24_UNORM_S8_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(ds_image_3D_1S.initialized());
ds_image_2D.Init(
256, 256, 1, VK_FORMAT_D16_UNORM,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(ds_image_2D.initialized());
ds_image_1S.Init(
256, 256, 1, VK_FORMAT_S8_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(ds_image_1S.initialized());
}
// Allocate buffers
VkBufferObj buffer_256k, buffer_128k, buffer_64k, buffer_16k;
VkMemoryPropertyFlags reqs = 0;
buffer_256k.init_as_src_and_dst(*m_device, 262144, reqs); // 256k
buffer_128k.init_as_src_and_dst(*m_device, 131072, reqs); // 128k
buffer_64k.init_as_src_and_dst(*m_device, 65536, reqs); // 64k
buffer_16k.init_as_src_and_dst(*m_device, 16384, reqs); // 16k
VkBufferImageCopy region = {};
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageOffset = {0, 0, 0};
region.imageExtent = {64, 64, 1};
region.bufferOffset = 0;
// attempt copies before putting command buffer in recording state
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-commandBuffer-recording");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-commandBuffer-recording");
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
// start recording
m_commandBuffer->begin();
// successful copies
m_errorMonitor->ExpectSuccess();
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
region.imageOffset.x = 16; // 16k copy, offset requires larger image
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
region.imageExtent.height = 78; // > 16k copy requires larger buffer & image
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
region.imageOffset.x = 0;
region.imageExtent.height = 64;
region.bufferOffset = 256; // 16k copy with buffer offset, requires larger buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region);
m_errorMonitor->VerifyNotFound();
// image/buffer too small (extent too large) on copy to image
region.imageExtent = {65, 64, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
// image/buffer too small (offset) on copy to image
region.imageExtent = {64, 64, 1};
region.imageOffset = {0, 4, 0};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-pRegions-00171"); // buffer too small
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-pRegions-00172"); // image too small
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_64k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
// image/buffer too small on copy to buffer
region.imageExtent = {64, 64, 1};
region.imageOffset = {0, 0, 0};
region.bufferOffset = 4;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // buffer too small
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_64k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
region.imageExtent = {64, 65, 1};
region.bufferOffset = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // image too small
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
// buffer size OK but rowlength causes loose packing
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183");
region.imageExtent = {64, 64, 1};
region.bufferRowLength = 68;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
// An extent with zero area should produce a warning, but no error
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT | VK_DEBUG_REPORT_ERROR_BIT_EXT, "} has zero area");
region.imageExtent.width = 0;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
// aspect bits
region.imageExtent = {64, 64, 1};
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
if (!missing_ds_support) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-aspectMask-00212"); // more than 1 aspect bit set
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1,
&region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-aspectMask-00211"); // different mis-matched aspect
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_depth.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1,
&region);
m_errorMonitor->VerifyFound();
}
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-aspectMask-00211"); // mis-matched aspect
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// Out-of-range mip levels should fail
region.imageSubresource.mipLevel = image_16k.create_info().mipLevels + 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703");
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00182"); // unavoidable "region exceeds image bounds" for non-existent mip
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01701");
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyBufferToImage-pRegions-00172"); // unavoidable "region exceeds image bounds" for non-existent mip
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
region.imageSubresource.mipLevel = 0;
// Out-of-range array layers should fail
region.imageSubresource.baseArrayLayer = image_16k.create_info().arrayLayers;
region.imageSubresource.layerCount = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704");
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(), 1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-imageSubresource-01702");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &region);
m_errorMonitor->VerifyFound();
region.imageSubresource.baseArrayLayer = 0;
// Layout mismatch should fail
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189");
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer_16k.handle(), image_16k.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region);
m_errorMonitor->VerifyFound();
// Test Depth/Stencil copies
if (missing_ds_support) {
printf("%s Depth / Stencil formats unsupported - skipping D/S tests.\n", kSkipPrefix);
} else {
VkBufferImageCopy ds_region = {};
ds_region.bufferOffset = 0;
ds_region.bufferRowLength = 0;
ds_region.bufferImageHeight = 0;
ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
ds_region.imageSubresource.mipLevel = 0;
ds_region.imageSubresource.baseArrayLayer = 0;
ds_region.imageSubresource.layerCount = 1;
ds_region.imageOffset = {0, 0, 0};
ds_region.imageExtent = {256, 256, 1};
// Depth copies that should succeed
m_errorMonitor->ExpectSuccess(); // Extract 4b depth per texel, pack into 256k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_256k.handle(), 1, &ds_region);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess(); // Extract 3b depth per texel, pack (loose) into 256k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_256k.handle(), 1, &ds_region);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess(); // Copy 2b depth per texel, into 128k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_128k.handle(), 1, &ds_region);
m_errorMonitor->VerifyNotFound();
// Depth copies that should fail
ds_region.bufferOffset = 4;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 4b depth per texel, pack into 256k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_256k.handle(), 1, &ds_region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 3b depth per texel, pack (loose) into 256k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_256k.handle(), 1, &ds_region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 2b depth per texel, into 128k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_2D.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_128k.handle(), 1, &ds_region);
m_errorMonitor->VerifyFound();
// Stencil copies that should succeed
ds_region.bufferOffset = 0;
ds_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_64k.handle(), 1, &ds_region);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess(); // Extract 1b stencil per texel, pack into 64k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_64k.handle(), 1, &ds_region);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess(); // Copy 1b depth per texel, into 64k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_64k.handle(), 1, &ds_region);
m_errorMonitor->VerifyNotFound();
// Stencil copies that should fail
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_4D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_16k.handle(), 1, &ds_region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Extract 1b stencil per texel, pack into 64k buffer
ds_region.bufferRowLength = 260;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_3D_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_64k.handle(), 1, &ds_region);
m_errorMonitor->VerifyFound();
ds_region.bufferRowLength = 0;
ds_region.bufferOffset = 4;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-pRegions-00183"); // Copy 1b depth per texel, into 64k buffer
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), ds_image_1S.handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
buffer_64k.handle(), 1, &ds_region);
m_errorMonitor->VerifyFound();
}
// Test compressed formats, if supported
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
if (!(device_features.textureCompressionBC || device_features.textureCompressionETC2 ||
device_features.textureCompressionASTC_LDR)) {
printf("%s No compressed formats supported - block compression tests skipped.\n", kSkipPrefix);
} else {
VkImageObj image_16k_4x4comp(m_device); // 128^2 texels as 32^2 compressed (4x4) blocks, 16k
VkImageObj image_NPOT_4x4comp(m_device); // 130^2 texels as 33^2 compressed (4x4) blocks
if (device_features.textureCompressionBC) {
image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL,
0);
image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_BC3_SRGB_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL,
0);
} else if (device_features.textureCompressionETC2) {
image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
} else {
image_16k_4x4comp.Init(128, 128, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
image_NPOT_4x4comp.Init(130, 130, 1, VK_FORMAT_ASTC_4x4_UNORM_BLOCK, VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
}
ASSERT_TRUE(image_16k_4x4comp.initialized());
// Just fits
m_errorMonitor->ExpectSuccess();
region.imageExtent = {128, 128, 1};
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyNotFound();
// with offset, too big for buffer
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImageToBuffer-pRegions-00183");
region.bufferOffset = 16;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
region.bufferOffset = 0;
// extents that are not a multiple of compressed block size
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00207"); // extent width not a multiple of block size
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
region.imageExtent.width = 66;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
region.imageExtent.width = 128;
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkBufferImageCopy-imageExtent-00208"); // extent height not a multiple of block size
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImageToBuffer-imageOffset-01794"); // image transfer granularity
region.imageExtent.height = 2;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
region.imageExtent.height = 128;
// TODO: All available compressed formats are 2D, with block depth of 1. Unable to provoke VU_01277.
// non-multiple extents are allowed if at the far edge of a non-block-multiple image - these should pass
m_errorMonitor->ExpectSuccess();
region.imageExtent.width = 66;
region.imageOffset.x = 64;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
region.imageExtent.width = 16;
region.imageOffset.x = 0;
region.imageExtent.height = 2;
region.imageOffset.y = 128;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_NPOT_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyNotFound();
region.imageOffset = {0, 0, 0};
// buffer offset must be a multiple of texel block size (16)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00206");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193");
region.imageExtent = {64, 64, 1};
region.bufferOffset = 24;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_16k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
// rowlength not a multiple of block width (4)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00203");
region.bufferOffset = 0;
region.bufferRowLength = 130;
region.bufferImageHeight = 0;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
// imageheight not a multiple of block height (4)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00204");
region.bufferRowLength = 0;
region.bufferImageHeight = 130;
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image_16k_4x4comp.handle(), VK_IMAGE_LAYOUT_GENERAL, buffer_64k.handle(),
1, &region);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, MiscImageLayerTests) {
TEST_DESCRIPTION("Image-related tests that don't belong elsewhere");
ASSERT_NO_FATAL_FAILURE(Init());
// TODO: Ideally we should check if a format is supported, before using it.
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 64bpp
ASSERT_TRUE(image.initialized());
VkBufferObj buffer;
VkMemoryPropertyFlags reqs = 0;
buffer.init_as_src(*m_device, 128 * 128 * 8, reqs);
VkBufferImageCopy region = {};
region.bufferRowLength = 128;
region.bufferImageHeight = 128;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT
region.imageSubresource.layerCount = 1;
region.imageExtent.height = 4;
region.imageExtent.width = 4;
region.imageExtent.depth = 1;
VkImageObj image2(m_device);
image2.Init(128, 128, 1, VK_FORMAT_R8G8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0); // 16bpp
ASSERT_TRUE(image2.initialized());
VkBufferObj buffer2;
VkMemoryPropertyFlags reqs2 = 0;
buffer2.init_as_src(*m_device, 128 * 128 * 2, reqs2);
VkBufferImageCopy region2 = {};
region2.bufferRowLength = 128;
region2.bufferImageHeight = 128;
region2.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
// layerCount can't be 0 - Expect MISMATCHED_IMAGE_ASPECT
region2.imageSubresource.layerCount = 1;
region2.imageExtent.height = 4;
region2.imageExtent.width = 4;
region2.imageExtent.depth = 1;
m_commandBuffer->begin();
// Image must have offset.z of 0 and extent.depth of 1
// Introduce failure by setting imageExtent.depth to 0
region.imageExtent.depth = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
region.imageExtent.depth = 1;
// Image must have offset.z of 0 and extent.depth of 1
// Introduce failure by setting imageOffset.z to 4
// Note: Also (unavoidably) triggers 'region exceeds image' #1228
region.imageOffset.z = 4;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-srcImage-00201");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyBufferToImage-pRegions-00172");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
region.imageOffset.z = 0;
// BufferOffset must be a multiple of the calling command's VkImage parameter's texel size
// Introduce failure by setting bufferOffset to 1 and 1/2 texels
region.bufferOffset = 4;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00193");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
// BufferOffset must be a multiple of 4
// Introduce failure by setting bufferOffset to a value not divisible by 4
region2.bufferOffset = 6;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferOffset-00194");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer2.handle(), image2.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region2);
m_errorMonitor->VerifyFound();
// BufferRowLength must be 0, or greater than or equal to the width member of imageExtent
region.bufferOffset = 0;
region.imageExtent.height = 128;
region.imageExtent.width = 128;
// Introduce failure by setting bufferRowLength > 0 but less than width
region.bufferRowLength = 64;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferRowLength-00195");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
// BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent
region.bufferRowLength = 128;
// Introduce failure by setting bufferRowHeight > 0 but less than height
region.bufferImageHeight = 64;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferImageCopy-bufferImageHeight-00196");
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer.handle(), image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
&region);
m_errorMonitor->VerifyFound();
region.bufferImageHeight = 128;
VkImageObj intImage1(m_device);
intImage1.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
intImage1.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageObj intImage2(m_device);
intImage2.Init(128, 128, 1, VK_FORMAT_R8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
intImage2.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_GENERAL);
VkImageBlit blitRegion = {};
blitRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.srcSubresource.baseArrayLayer = 0;
blitRegion.srcSubresource.layerCount = 1;
blitRegion.srcSubresource.mipLevel = 0;
blitRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blitRegion.dstSubresource.baseArrayLayer = 0;
blitRegion.dstSubresource.layerCount = 1;
blitRegion.dstSubresource.mipLevel = 0;
blitRegion.srcOffsets[0] = {128, 0, 0};
blitRegion.srcOffsets[1] = {128, 128, 1};
blitRegion.dstOffsets[0] = {0, 128, 0};
blitRegion.dstOffsets[1] = {128, 128, 1};
// Look for NULL-blit warning
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
"vkCmdBlitImage: pRegions[0].srcOffsets specify a zero-volume area.");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
"vkCmdBlitImage: pRegions[0].dstOffsets specify a zero-volume area.");
vkCmdBlitImage(m_commandBuffer->handle(), intImage1.handle(), intImage1.Layout(), intImage2.handle(), intImage2.Layout(), 1,
&blitRegion, VK_FILTER_LINEAR);
m_errorMonitor->VerifyFound();
}
VkResult GPDIFPHelper(VkPhysicalDevice dev, const VkImageCreateInfo *ci, VkImageFormatProperties *limits = nullptr) {
VkImageFormatProperties tmp_limits;
limits = limits ? limits : &tmp_limits;
return vkGetPhysicalDeviceImageFormatProperties(dev, ci->format, ci->imageType, ci->tiling, ci->usage, ci->flags, limits);
}
TEST_F(VkLayerTest, CreateImageMiscErrors) {
TEST_DESCRIPTION("Misc leftover valid usage errors in VkImageCreateInfo struct");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
VkImage null_image; // throwaway target for all the vkCreateImage
VkImageCreateInfo tmp_img_ci = {};
tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
tmp_img_ci.flags = 0; // assumably any is supported
tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported
tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages
tmp_img_ci.extent = {64, 64, 1}; // limit is 256 for 3D, or 4096
tmp_img_ci.mipLevels = 1; // any is supported
tmp_img_ci.arrayLayers = 1; // limit is 256
tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR
// if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1
tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format
tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
const VkImageCreateInfo safe_image_ci = tmp_img_ci;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci));
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT;
image_ci.queueFamilyIndexCount = 2;
image_ci.pQueueFamilyIndices = nullptr;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00941");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.sharingMode = VK_SHARING_MODE_CONCURRENT;
image_ci.queueFamilyIndexCount = 1;
const uint32_t queue_family = 0;
image_ci.pQueueFamilyIndices = &queue_family;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-sharingMode-00942");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.format = VK_FORMAT_UNDEFINED;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00943");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
image_ci.arrayLayers = 6;
image_ci.imageType = VK_IMAGE_TYPE_1D;
image_ci.extent = {64, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci = safe_image_ci;
image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
image_ci.imageType = VK_IMAGE_TYPE_3D;
image_ci.extent = {4, 4, 4};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00949");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support
image_ci.samples = VK_SAMPLE_COUNT_4_BIT;
image_ci.imageType = VK_IMAGE_TYPE_3D;
image_ci.extent = {4, 4, 4};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci = safe_image_ci;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support
image_ci.samples = VK_SAMPLE_COUNT_4_BIT;
image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
image_ci.arrayLayers = 6;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci = safe_image_ci;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support
image_ci.samples = VK_SAMPLE_COUNT_4_BIT;
image_ci.tiling = VK_IMAGE_TILING_LINEAR;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci = safe_image_ci;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // always has 4 samples support
image_ci.samples = VK_SAMPLE_COUNT_4_BIT;
image_ci.mipLevels = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00962");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci.usage = VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
image_ci.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00963");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00966");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-flags-00969");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
// InitialLayout not VK_IMAGE_LAYOUT_UNDEFINED or VK_IMAGE_LAYOUT_PREDEFINED
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-initialLayout-00993");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, CreateImageMinLimitsViolation) {
TEST_DESCRIPTION("Create invalid image with invalid parameters violation minimum limit, such as being zero.");
ASSERT_NO_FATAL_FAILURE(Init());
VkImage null_image; // throwaway target for all the vkCreateImage
VkImageCreateInfo tmp_img_ci = {};
tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
tmp_img_ci.flags = 0; // assumably any is supported
tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported
tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages
tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096
tmp_img_ci.mipLevels = 1; // any is supported
tmp_img_ci.arrayLayers = 1; // limit is 256
tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR
// if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1
tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format
tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
const VkImageCreateInfo safe_image_ci = tmp_img_ci;
enum Dimension { kWidth = 0x1, kHeight = 0x2, kDepth = 0x4 };
for (underlying_type<Dimension>::type bad_dimensions = 0x1; bad_dimensions < 0x8; ++bad_dimensions) {
VkExtent3D extent = {1, 1, 1};
if (bad_dimensions & kWidth) {
extent.width = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00944");
}
if (bad_dimensions & kHeight) {
extent.height = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00945");
}
if (bad_dimensions & kDepth) {
extent.depth = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00946");
}
VkImageCreateInfo bad_image_ci = safe_image_ci;
bad_image_ci.imageType = VK_IMAGE_TYPE_3D; // has to be 3D otherwise it might trigger the non-1 error instead
bad_image_ci.extent = extent;
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo bad_image_ci = safe_image_ci;
bad_image_ci.mipLevels = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00947");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo bad_image_ci = safe_image_ci;
bad_image_ci.arrayLayers = 0;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-00948");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo bad_image_ci = safe_image_ci;
bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
bad_image_ci.arrayLayers = 5;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
bad_image_ci.arrayLayers = 6;
bad_image_ci.extent = {64, 63, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00954");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo bad_image_ci = safe_image_ci;
bad_image_ci.imageType = VK_IMAGE_TYPE_1D;
bad_image_ci.extent = {64, 2, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
bad_image_ci.imageType = VK_IMAGE_TYPE_1D;
bad_image_ci.extent = {64, 1, 2};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00956");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
bad_image_ci.imageType = VK_IMAGE_TYPE_2D;
bad_image_ci.extent = {64, 64, 2};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
bad_image_ci.imageType = VK_IMAGE_TYPE_2D;
bad_image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
bad_image_ci.arrayLayers = 6;
bad_image_ci.extent = {64, 64, 2};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00957");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo bad_image_ci = safe_image_ci;
bad_image_ci.imageType = VK_IMAGE_TYPE_3D;
bad_image_ci.arrayLayers = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00961");
vkCreateImage(m_device->device(), &bad_image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
}
VkFormat FindFormatLinearWithoutMips(VkPhysicalDevice gpu, VkImageCreateInfo image_ci) {
image_ci.tiling = VK_IMAGE_TILING_LINEAR;
const VkFormat first_vk_format = static_cast<VkFormat>(1);
const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184
for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) {
image_ci.format = format;
// WORKAROUND for dev_sim and mock_icd not containing valid format limits yet
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props);
const VkFormatFeatureFlags core_filter = 0x1FFF;
const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter
: format_props.optimalTilingFeatures & core_filter;
if (!(features & core_filter)) continue;
VkImageFormatProperties img_limits;
if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && img_limits.maxMipLevels == 1) return format;
}
return VK_FORMAT_UNDEFINED;
}
bool FindFormatWithoutSamples(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) {
const VkFormat first_vk_format = static_cast<VkFormat>(1);
const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184
for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) {
image_ci.format = format;
// WORKAROUND for dev_sim and mock_icd not containing valid format limits yet
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props);
const VkFormatFeatureFlags core_filter = 0x1FFF;
const auto features = (image_ci.tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter
: format_props.optimalTilingFeatures & core_filter;
if (!(features & core_filter)) continue;
for (VkSampleCountFlagBits samples = VK_SAMPLE_COUNT_64_BIT; samples > 0;
samples = static_cast<VkSampleCountFlagBits>(samples >> 1)) {
image_ci.samples = samples;
VkImageFormatProperties img_limits;
if (VK_SUCCESS == GPDIFPHelper(gpu, &image_ci, &img_limits) && !(img_limits.sampleCounts & samples)) return true;
}
}
return false;
}
TEST_F(VkLayerTest, CreateImageMaxLimitsViolation) {
TEST_DESCRIPTION("Create invalid image with invalid parameters exceeding physical device limits.");
ASSERT_NO_FATAL_FAILURE(Init());
VkImage null_image; // throwaway target for all the vkCreateImage
VkImageCreateInfo tmp_img_ci = {};
tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
tmp_img_ci.flags = 0; // assumably any is supported
tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported
tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages
tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096
tmp_img_ci.mipLevels = 1; // any is supported
tmp_img_ci.arrayLayers = 1; // limit is 256
tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR
// if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1
tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format
tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
const VkImageCreateInfo safe_image_ci = tmp_img_ci;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci));
const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits;
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.imageType = VK_IMAGE_TYPE_1D;
VkImageFormatProperties img_limits;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits));
// WORKAROUND for dev_sim and mock_icd not containing valid format limits yet
img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimension1D);
if (img_limits.maxExtent.width != UINT32_MAX) {
image_ci.extent = {img_limits.maxExtent.width + 1, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00951");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s 1D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.imageType = VK_IMAGE_TYPE_2D;
VkImageFormatProperties img_limits;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits));
// WORKAROUND for dev_sim and mock_icd not containing valid format limits yet
img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimension2D);
img_limits.maxExtent.height = std::max(img_limits.maxExtent.height, dev_limits.maxImageDimension2D);
if (img_limits.maxExtent.width != UINT32_MAX) {
image_ci.extent = {img_limits.maxExtent.width + 1, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s 2D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
if (img_limits.maxExtent.height != UINT32_MAX) {
image_ci.extent = {1, img_limits.maxExtent.height + 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s 2D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.flags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
image_ci.arrayLayers = 6;
image_ci.imageType = VK_IMAGE_TYPE_2D;
VkImageFormatProperties img_limits;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits));
// WORKAROUND for dev_sim and mock_icd not containing valid format limits yet
img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimensionCube);
img_limits.maxExtent.height = std::max(img_limits.maxExtent.height, dev_limits.maxImageDimensionCube);
if (img_limits.maxExtent.width != UINT32_MAX || img_limits.maxExtent.height != UINT32_MAX) {
image_ci.extent = {img_limits.maxExtent.width + 1, img_limits.maxExtent.height + 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00953");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s CUBE VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.imageType = VK_IMAGE_TYPE_3D;
VkImageFormatProperties img_limits;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits));
// WORKAROUND for dev_sim and mock_icd not containing valid format limits yet
img_limits.maxExtent.width = std::max(img_limits.maxExtent.width, dev_limits.maxImageDimension3D);
img_limits.maxExtent.height = std::max(img_limits.maxExtent.height, dev_limits.maxImageDimension3D);
img_limits.maxExtent.depth = std::max(img_limits.maxExtent.depth, dev_limits.maxImageDimension3D);
if (img_limits.maxExtent.width != UINT32_MAX) {
image_ci.extent = {img_limits.maxExtent.width + 1, 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00955");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s 3D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
if (img_limits.maxExtent.height != UINT32_MAX) {
image_ci.extent = {1, img_limits.maxExtent.height + 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00955");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s 3D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
if (img_limits.maxExtent.depth != UINT32_MAX) {
image_ci.extent = {1, 1, img_limits.maxExtent.depth + 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00955");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s 3D VkImageFormatProperties::maxExtent is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.extent = {8, 8, 1};
image_ci.mipLevels = 4 + 1; // 4 = log2(8) + 1
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
image_ci.extent = {8, 15, 1};
image_ci.mipLevels = 4 + 1; // 4 = floor(log2(15)) + 1
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-mipLevels-00958");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.tiling = VK_IMAGE_TILING_LINEAR;
image_ci.extent = {64, 64, 1};
image_ci.format = FindFormatLinearWithoutMips(gpu(), image_ci);
image_ci.mipLevels = 2;
if (image_ci.format != VK_FORMAT_UNDEFINED) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-extent-00959");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s Cannot find a format to test maxMipLevels limit; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
VkImageFormatProperties img_limits;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits));
if (img_limits.maxArrayLayers != UINT32_MAX) {
image_ci.arrayLayers = img_limits.maxArrayLayers + 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-arrayLayers-00960");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s VkImageFormatProperties::maxArrayLayers is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
bool found = FindFormatWithoutSamples(gpu(), image_ci);
if (found) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-samples-00967");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s Could not find a format with some unsupported samples; skipping part of test.\n", kSkipPrefix);
}
}
{
VkImageCreateInfo image_ci = safe_image_ci;
image_ci.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; // (any attachment bit)
VkImageFormatProperties img_limits;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &image_ci, &img_limits));
if (dev_limits.maxFramebufferWidth != UINT32_MAX) {
image_ci.extent = {dev_limits.maxFramebufferWidth + 1, 64, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00964");
if (image_ci.extent.width > img_limits.maxExtent.width) { // might also trip image limits VU
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952");
}
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s VkPhysicalDeviceLimits::maxFramebufferWidth is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
if (dev_limits.maxFramebufferHeight != UINT32_MAX) {
image_ci.usage = VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT; // try different one too
image_ci.extent = {64, dev_limits.maxFramebufferHeight + 1, 1};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-usage-00965");
if (image_ci.extent.height > img_limits.maxExtent.height) { // might also trip image limits VU
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-imageType-00952");
}
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s VkPhysicalDeviceLimits::maxFramebufferHeight is already UINT32_MAX; skipping part of test.\n", kSkipPrefix);
}
}
}
bool FindUnsupportedImage(VkPhysicalDevice gpu, VkImageCreateInfo &image_ci) {
const VkFormat first_vk_format = static_cast<VkFormat>(1);
const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184
const std::vector<VkImageTiling> tilings = {VK_IMAGE_TILING_LINEAR, VK_IMAGE_TILING_OPTIMAL};
for (const auto tiling : tilings) {
image_ci.tiling = tiling;
for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) {
image_ci.format = format;
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props);
const VkFormatFeatureFlags core_filter = 0x1FFF;
const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter
: format_props.optimalTilingFeatures & core_filter;
if (!(features & core_filter)) continue; // We wand supported by features, but not by ImageFormatProperties
// get as many usage flags as possible
image_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
if (features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_SAMPLED_BIT;
if (features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT) image_ci.usage |= VK_IMAGE_USAGE_STORAGE_BIT;
if (features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) image_ci.usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if (features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)
image_ci.usage |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
VkImageFormatProperties img_limits;
if (VK_ERROR_FORMAT_NOT_SUPPORTED == GPDIFPHelper(gpu, &image_ci, &img_limits)) {
return true;
}
}
}
return false;
}
VkFormat FindFormatWithoutFeatures(VkPhysicalDevice gpu, VkImageTiling tiling,
VkFormatFeatureFlags undesired_features = UINT32_MAX) {
const VkFormat first_vk_format = static_cast<VkFormat>(1);
const VkFormat last_vk_format = static_cast<VkFormat>(130); // avoid compressed/feature protected, otherwise 184
for (VkFormat format = first_vk_format; format <= last_vk_format; format = static_cast<VkFormat>(format + 1)) {
VkFormatProperties format_props;
vkGetPhysicalDeviceFormatProperties(gpu, format, &format_props);
const VkFormatFeatureFlags core_filter = 0x1FFF;
const auto features = (tiling == VK_IMAGE_TILING_LINEAR) ? format_props.linearTilingFeatures & core_filter
: format_props.optimalTilingFeatures & core_filter;
const auto valid_features = features & core_filter;
if (undesired_features == UINT32_MAX) {
if (!valid_features) return format;
} else {
if (valid_features && !(valid_features & undesired_features)) return format;
}
}
return VK_FORMAT_UNDEFINED;
}
TEST_F(VkLayerTest, CreateImageFormatSupportErrors) {
TEST_DESCRIPTION("Valid usage errors of format support in VkImageCreateInfo struct");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
VkImage null_image; // throwaway target for all the vkCreateImage
VkImageCreateInfo tmp_img_ci = {};
tmp_img_ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
tmp_img_ci.flags = 0; // assumably any is supported
tmp_img_ci.imageType = VK_IMAGE_TYPE_2D; // any is supported
tmp_img_ci.format = VK_FORMAT_R8G8B8A8_UNORM; // has mandatory support for all usages
tmp_img_ci.extent = {1, 1, 1}; // limit is 256 for 3D, or 4096
tmp_img_ci.mipLevels = 1; // any is supported
tmp_img_ci.arrayLayers = 1; // limit is 256
tmp_img_ci.samples = VK_SAMPLE_COUNT_1_BIT; // needs to be 1 if TILING_LINEAR
// if VK_IMAGE_TILING_LINEAR imageType must be 2D, usage must be TRANSFER, and levels layers samplers all 1
tmp_img_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
tmp_img_ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT; // depends on format
tmp_img_ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
const VkImageCreateInfo safe_image_ci = tmp_img_ci;
ASSERT_VK_SUCCESS(GPDIFPHelper(gpu(), &safe_image_ci));
{
VkImageCreateInfo image_ci = safe_image_ci;
bool found = FindUnsupportedImage(gpu(), image_ci);
if (found) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCreateInfo-format-00940");
vkCreateImage(m_device->handle(), &image_ci, NULL, &null_image);
m_errorMonitor->VerifyFound();
} else {
printf("%s Failed to find image unsupported by vkGetPhysicalDeviceImageFormatProperties; skipping test.\n",
kSkipPrefix);
}
}
}
TEST_F(VkLayerTest, CopyImageTypeExtentMismatch) {
// Image copy tests where format type and extents don't match
ASSERT_NO_FATAL_FAILURE(Init());
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_1D;
ci.format = VK_FORMAT_R8G8B8A8_UNORM;
ci.extent = {32, 1, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Create 1D image
VkImageObj image_1D(m_device);
image_1D.init(&ci);
ASSERT_TRUE(image_1D.initialized());
// 2D image
ci.imageType = VK_IMAGE_TYPE_2D;
ci.extent = {32, 32, 1};
VkImageObj image_2D(m_device);
image_2D.init(&ci);
ASSERT_TRUE(image_2D.initialized());
// 3D image
ci.imageType = VK_IMAGE_TYPE_3D;
ci.extent = {32, 32, 8};
VkImageObj image_3D(m_device);
image_3D.init(&ci);
ASSERT_TRUE(image_3D.initialized());
// 2D image array
ci.imageType = VK_IMAGE_TYPE_2D;
ci.extent = {32, 32, 1};
ci.arrayLayers = 8;
VkImageObj image_2D_array(m_device);
image_2D_array.init(&ci);
ASSERT_TRUE(image_2D_array.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {32, 1, 1};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
// Sanity check
m_errorMonitor->ExpectSuccess();
m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyNotFound();
// 1D texture w/ offset.y > 0. Source = VU 09c00124, dest = 09c00130
copy_region.srcOffset.y = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun
m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcOffset.y = 0;
copy_region.dstOffset.y = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun
m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset.y = 0;
// 1D texture w/ extent.height > 1. Source = VU 09c00124, dest = 09c00130
copy_region.extent.height = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00146");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145"); // also y-dim overrun
m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-00152");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151"); // also y-dim overrun
m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.extent.height = 1;
// 1D texture w/ offset.z > 0. Source = VU 09c00df2, dest = 09c00df4
copy_region.srcOffset.z = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun
m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcOffset.z = 0;
copy_region.dstOffset.z = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun
m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset.z = 0;
// 1D texture w/ extent.depth > 1. Source = VU 09c00df2, dest = 09c00df4
copy_region.extent.depth = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01785");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst)
m_commandBuffer->CopyImage(image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01786");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst)
m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_1D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.extent.depth = 1;
// 2D texture w/ offset.z > 0. Source = VU 09c00df6, dest = 09c00df8
copy_region.extent = {16, 16, 1};
copy_region.srcOffset.z = 4;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01787");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageCopy-srcOffset-00147"); // also z-dim overrun (src)
m_commandBuffer->CopyImage(image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcOffset.z = 0;
copy_region.dstOffset.z = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01788");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkImageCopy-dstOffset-00153"); // also z-dim overrun (dst)
m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset.z = 0;
// 3D texture accessing an array layer other than 0. VU 09c0011a
copy_region.extent = {4, 4, 1};
copy_region.srcSubresource.baseArrayLayer = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-00141");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcSubresource-01698"); // also 'too many layers'
m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageTypeExtentMismatchMaintenance1) {
// Image copy tests where format type and extents don't match and the Maintenance1 extension is enabled
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
} else {
printf("%s Maintenance1 extension cannot be enabled, test skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkFormat image_format = VK_FORMAT_R8G8B8A8_UNORM;
VkFormatProperties format_props;
// TODO: Remove this check if or when devsim handles extensions.
// The chosen format has mandatory support the transfer src and dst format features when Maitenance1 is enabled. However, our
// use of devsim and the mock ICD violate this guarantee.
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_format, &format_props);
if (!(format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_TRANSFER_SRC_BIT)) {
printf("%s Maintenance1 extension is not supported.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_1D;
ci.format = image_format;
ci.extent = {32, 1, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Create 1D image
VkImageObj image_1D(m_device);
image_1D.init(&ci);
ASSERT_TRUE(image_1D.initialized());
// 2D image
ci.imageType = VK_IMAGE_TYPE_2D;
ci.extent = {32, 32, 1};
VkImageObj image_2D(m_device);
image_2D.init(&ci);
ASSERT_TRUE(image_2D.initialized());
// 3D image
ci.imageType = VK_IMAGE_TYPE_3D;
ci.extent = {32, 32, 8};
VkImageObj image_3D(m_device);
image_3D.init(&ci);
ASSERT_TRUE(image_3D.initialized());
// 2D image array
ci.imageType = VK_IMAGE_TYPE_2D;
ci.extent = {32, 32, 1};
ci.arrayLayers = 8;
VkImageObj image_2D_array(m_device);
image_2D_array.init(&ci);
ASSERT_TRUE(image_2D_array.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {32, 1, 1};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
// Copy from layer not present
copy_region.srcSubresource.baseArrayLayer = 4;
copy_region.srcSubresource.layerCount = 6;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcSubresource-01698");
m_commandBuffer->CopyImage(image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
// Copy to layer not present
copy_region.dstSubresource.baseArrayLayer = 1;
copy_region.dstSubresource.layerCount = 8;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-dstSubresource-01699");
m_commandBuffer->CopyImage(image_3D.image(), VK_IMAGE_LAYOUT_GENERAL, image_2D_array.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstSubresource.layerCount = 1;
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageCompressedBlockAlignment) {
// Image copy tests on compressed images with block alignment errors
ASSERT_NO_FATAL_FAILURE(Init());
// Select a compressed format and verify support
VkPhysicalDeviceFeatures device_features = {};
ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features));
VkFormat compressed_format = VK_FORMAT_UNDEFINED;
if (device_features.textureCompressionBC) {
compressed_format = VK_FORMAT_BC3_SRGB_BLOCK;
} else if (device_features.textureCompressionETC2) {
compressed_format = VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK;
} else if (device_features.textureCompressionASTC_LDR) {
compressed_format = VK_FORMAT_ASTC_4x4_UNORM_BLOCK;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = compressed_format;
ci.extent = {64, 64, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageFormatProperties img_prop = {};
if (VK_SUCCESS != vkGetPhysicalDeviceImageFormatProperties(m_device->phy().handle(), ci.format, ci.imageType, ci.tiling,
ci.usage, ci.flags, &img_prop)) {
printf("%s No compressed formats supported - CopyImageCompressedBlockAlignment skipped.\n", kSkipPrefix);
return;
}
// Create images
VkImageObj image_1(m_device);
image_1.init(&ci);
ASSERT_TRUE(image_1.initialized());
ci.extent = {62, 62, 1}; // slightly smaller and not divisible by block size
VkImageObj image_2(m_device);
image_2.init(&ci);
ASSERT_TRUE(image_2.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {48, 48, 1};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
// Sanity check
m_errorMonitor->ExpectSuccess();
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
std::string vuid;
bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) ||
(m_device->props.apiVersion >= VK_API_VERSION_1_1));
// Src, Dest offsets must be multiples of compressed block sizes {4, 4, 1}
// Image transfer granularity gets set to compressed block size, so an ITG error is also (unavoidably) triggered.
vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01727" : "VUID-VkImageCopy-srcOffset-00157";
copy_region.srcOffset = {2, 4, 0}; // source width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcOffset = {12, 1, 0}; // source height
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcOffset-01783"); // srcOffset image transfer granularity
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcOffset = {0, 0, 0};
vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01731" : "VUID-VkImageCopy-dstOffset-00162";
copy_region.dstOffset = {1, 0, 0}; // dest width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset = {4, 1, 0}; // dest height
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-dstOffset-01784"); // dstOffset image transfer granularity
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset = {0, 0, 0};
// Copy extent must be multiples of compressed block sizes {4, 4, 1} if not full width/height
vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01728" : "VUID-VkImageCopy-extent-00158";
copy_region.extent = {62, 60, 1}; // source width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
vuid = ycbcr ? "VUID-VkImageCopy-srcImage-01729" : "VUID-VkImageCopy-extent-00159";
copy_region.extent = {60, 62, 1}; // source height
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-srcOffset-01783"); // src extent image transfer granularity
m_commandBuffer->CopyImage(image_1.image(), VK_IMAGE_LAYOUT_GENERAL, image_2.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01732" : "VUID-VkImageCopy-extent-00163";
copy_region.extent = {62, 60, 1}; // dest width
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity
m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
vuid = ycbcr ? "VUID-VkImageCopy-dstImage-01733" : "VUID-VkImageCopy-extent-00164";
copy_region.extent = {60, 62, 1}; // dest height
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-dstOffset-01784"); // dst extent image transfer granularity
m_commandBuffer->CopyImage(image_2.image(), VK_IMAGE_LAYOUT_GENERAL, image_1.image(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyFound();
// Note: "VUID-VkImageCopy-extent-00160", "VUID-VkImageCopy-extent-00165", "VUID-VkImageCopy-srcImage-01730",
// "VUID-VkImageCopy-dstImage-01734"
// There are currently no supported compressed formats with a block depth other than 1,
// so impossible to create a 'not a multiple' condition for depth.
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageSinglePlane422Alignment) {
// Image copy tests on single-plane _422 formats with block alignment errors
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Select a _422 format and verify support
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8B8G8R8_422_UNORM_KHR;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify formats
VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
if (!supported) {
printf("%s Single-plane _422 image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
// Create images
ci.extent = {64, 64, 1};
VkImageObj image_422(m_device);
image_422.init(&ci);
ASSERT_TRUE(image_422.initialized());
ci.extent = {64, 64, 1};
ci.format = VK_FORMAT_R8G8B8A8_UNORM;
VkImageObj image_ucmp(m_device);
image_ucmp.init(&ci);
ASSERT_TRUE(image_ucmp.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {48, 48, 1};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
// Src offsets must be multiples of compressed block sizes
copy_region.srcOffset = {3, 4, 0}; // source offset x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01727");
m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcOffset = {0, 0, 0};
// Dst offsets must be multiples of compressed block sizes
copy_region.dstOffset = {1, 0, 0};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01731");
m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset = {0, 0, 0};
// Copy extent must be multiples of compressed block sizes if not full width/height
copy_region.extent = {31, 60, 1}; // 422 source, extent.x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01728");
m_commandBuffer->CopyImage(image_422.image(), VK_IMAGE_LAYOUT_GENERAL, image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
// 422 dest, extent.x
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01732");
m_commandBuffer->CopyImage(image_ucmp.image(), VK_IMAGE_LAYOUT_GENERAL, image_422.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstOffset = {0, 0, 0};
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageMultiplaneAspectBits) {
// Image copy tests on multiplane images with aspect errors
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Select multi-plane formats and verify support
VkFormat mp3_format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR;
VkFormat mp2_format = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR;
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = mp2_format;
ci.extent = {256, 256, 1};
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify formats
VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
ci.format = mp3_format;
supported = supported && ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
if (!supported) {
printf("%s Multiplane image formats not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
// Create images
VkImageObj mp3_image(m_device);
mp3_image.init(&ci);
ASSERT_TRUE(mp3_image.initialized());
ci.format = mp2_format;
VkImageObj mp2_image(m_device);
mp2_image.init(&ci);
ASSERT_TRUE(mp2_image.initialized());
ci.format = VK_FORMAT_D24_UNORM_S8_UINT;
VkImageObj sp_image(m_device);
sp_image.init(&ci);
ASSERT_TRUE(sp_image.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {128, 128, 1};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01552");
m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01553");
m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01554");
m_commandBuffer->CopyImage(mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01555");
m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcImage-01556");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also
m_commandBuffer->CopyImage(mp2_image.image(), VK_IMAGE_LAYOUT_GENERAL, sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstImage-01557");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "dest image depth/stencil formats"); // also
m_commandBuffer->CopyImage(sp_image.image(), VK_IMAGE_LAYOUT_GENERAL, mp3_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageSrcSizeExceeded) {
// Image copy with source region specified greater than src image size
ASSERT_NO_FATAL_FAILURE(Init());
// Create images with full mip chain
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_3D;
ci.format = VK_FORMAT_R8G8B8A8_UNORM;
ci.extent = {32, 32, 8};
ci.mipLevels = 6;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj src_image(m_device);
src_image.init(&ci);
ASSERT_TRUE(src_image.initialized());
// Dest image with one more mip level
ci.extent = {64, 64, 16};
ci.mipLevels = 7;
ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
VkImageObj dst_image(m_device);
dst_image.init(&ci);
ASSERT_TRUE(dst_image.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {32, 32, 8};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
m_errorMonitor->ExpectSuccess();
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyNotFound();
// Source exceeded in x-dim, VU 01202
copy_region.srcOffset.x = 4;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-pRegions-00122"); // General "contained within" VU
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00144");
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
// Source exceeded in y-dim, VU 01203
copy_region.srcOffset.x = 0;
copy_region.extent.height = 48;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00145");
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
// Source exceeded in z-dim, VU 01204
copy_region.extent = {4, 4, 4};
copy_region.srcSubresource.mipLevel = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00122");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-srcOffset-00147");
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageDstSizeExceeded) {
// Image copy with dest region specified greater than dest image size
ASSERT_NO_FATAL_FAILURE(Init());
// Create images with full mip chain
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_3D;
ci.format = VK_FORMAT_R8G8B8A8_UNORM;
ci.extent = {32, 32, 8};
ci.mipLevels = 6;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj dst_image(m_device);
dst_image.init(&ci);
ASSERT_TRUE(dst_image.initialized());
// Src image with one more mip level
ci.extent = {64, 64, 16};
ci.mipLevels = 7;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
VkImageObj src_image(m_device);
src_image.init(&ci);
ASSERT_TRUE(src_image.initialized());
m_commandBuffer->begin();
VkImageCopy copy_region;
copy_region.extent = {32, 32, 8};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
m_errorMonitor->ExpectSuccess();
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyNotFound();
// Dest exceeded in x-dim, VU 01205
copy_region.dstOffset.x = 4;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdCopyImage-pRegions-00123"); // General "contained within" VU
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00150");
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
// Dest exceeded in y-dim, VU 01206
copy_region.dstOffset.x = 0;
copy_region.extent.height = 48;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00151");
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
// Dest exceeded in z-dim, VU 01207
copy_region.extent = {4, 4, 4};
copy_region.dstSubresource.mipLevel = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-pRegions-00123");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-dstOffset-00153");
m_commandBuffer->CopyImage(src_image.image(), VK_IMAGE_LAYOUT_GENERAL, dst_image.image(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copy_region);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageFormatSizeMismatch) {
VkResult err;
bool pass;
// Create color images with different format sizes and try to copy between them
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00135");
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of different types and try to copy between them
VkImage srcImage;
VkImage dstImage;
VkDeviceMemory srcMem;
VkDeviceMemory destMem;
VkMemoryRequirements memReqs;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage);
ASSERT_VK_SUCCESS(err);
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
// Introduce failure by creating second image with a different-sized format.
image_create_info.format = VK_FORMAT_R5G5B5A1_UNORM_PACK16;
VkFormatProperties properties;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), image_create_info.format, &properties);
if (properties.optimalTilingFeatures == 0) {
vkDestroyImage(m_device->device(), srcImage, NULL);
printf("%s Image format not supported; skipped.\n", kSkipPrefix);
return;
}
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage);
ASSERT_VK_SUCCESS(err);
// Allocate memory
VkMemoryAllocateInfo memAlloc = {};
memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memAlloc.pNext = NULL;
memAlloc.allocationSize = 0;
memAlloc.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs);
memAlloc.allocationSize = memReqs.size;
pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs);
memAlloc.allocationSize = memReqs.size;
pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkImageCopy copyRegion;
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset.x = 0;
copyRegion.srcOffset.y = 0;
copyRegion.srcOffset.z = 0;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset.x = 0;
copyRegion.dstOffset.y = 0;
copyRegion.dstOffset.z = 0;
copyRegion.extent.width = 1;
copyRegion.extent.height = 1;
copyRegion.extent.depth = 1;
m_commandBuffer->CopyImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), srcImage, NULL);
vkDestroyImage(m_device->device(), dstImage, NULL);
vkFreeMemory(m_device->device(), srcMem, NULL);
vkFreeMemory(m_device->device(), destMem, NULL);
}
TEST_F(VkLayerTest, CopyImageDepthStencilFormatMismatch) {
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s Couldn't depth stencil image format.\n", kSkipPrefix);
return;
}
VkFormatProperties properties;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties);
if (properties.optimalTilingFeatures == 0) {
printf("%s Image format not supported; skipped.\n", kSkipPrefix);
return;
}
VkImageObj srcImage(m_device);
srcImage.Init(32, 32, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(srcImage.initialized());
VkImageObj dstImage(m_device);
dstImage.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(dstImage.initialized());
// Create two images of different types and try to copy between them
m_commandBuffer->begin();
VkImageCopy copyRegion;
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset.x = 0;
copyRegion.srcOffset.y = 0;
copyRegion.srcOffset.z = 0;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset.x = 0;
copyRegion.dstOffset.y = 0;
copyRegion.dstOffset.z = 0;
copyRegion.extent.width = 1;
copyRegion.extent.height = 1;
copyRegion.extent.depth = 1;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdCopyImage called with unmatched source and dest image depth");
m_commandBuffer->CopyImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copyRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CopyImageSampleCountMismatch) {
TEST_DESCRIPTION("Image copies with sample count mis-matches");
ASSERT_NO_FATAL_FAILURE(Init());
VkImageFormatProperties image_format_properties;
vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0,
&image_format_properties);
if ((0 == (VK_SAMPLE_COUNT_2_BIT & image_format_properties.sampleCounts)) ||
(0 == (VK_SAMPLE_COUNT_4_BIT & image_format_properties.sampleCounts))) {
printf("%s Image multi-sample support not found; skipped.\n", kSkipPrefix);
return;
}
VkImageCreateInfo ci;
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_R8G8B8A8_UNORM;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.queueFamilyIndexCount = 0;
ci.pQueueFamilyIndices = NULL;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VkImageObj image1(m_device);
image1.init(&ci);
ASSERT_TRUE(image1.initialized());
ci.samples = VK_SAMPLE_COUNT_2_BIT;
VkImageObj image2(m_device);
image2.init(&ci);
ASSERT_TRUE(image2.initialized());
ci.samples = VK_SAMPLE_COUNT_4_BIT;
VkImageObj image4(m_device);
image4.init(&ci);
ASSERT_TRUE(image4.initialized());
m_commandBuffer->begin();
VkImageCopy copyRegion;
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset = {0, 0, 0};
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset = {0, 0, 0};
copyRegion.extent = {128, 128, 1};
// Copy a single sample image to/from a multi-sample image
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136");
vkCmdCopyImage(m_commandBuffer->handle(), image1.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copyRegion);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136");
vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image1.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copyRegion);
m_errorMonitor->VerifyFound();
// Copy between multi-sample images with different sample counts
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136");
vkCmdCopyImage(m_commandBuffer->handle(), image2.handle(), VK_IMAGE_LAYOUT_GENERAL, image4.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copyRegion);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdCopyImage-srcImage-00136");
vkCmdCopyImage(m_commandBuffer->handle(), image4.handle(), VK_IMAGE_LAYOUT_GENERAL, image2.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&copyRegion);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, CopyImageAspectMismatch) {
TEST_DESCRIPTION("Image copies with aspect mask errors");
ASSERT_NO_FATAL_FAILURE(Init());
auto ds_format = FindSupportedDepthStencilFormat(gpu());
if (!ds_format) {
printf("%s Couldn't find depth stencil format.\n", kSkipPrefix);
return;
}
VkFormatProperties properties;
vkGetPhysicalDeviceFormatProperties(m_device->phy().handle(), VK_FORMAT_D32_SFLOAT, &properties);
if (properties.optimalTilingFeatures == 0) {
printf("%s Image format VK_FORMAT_D32_SFLOAT not supported; skipped.\n", kSkipPrefix);
return;
}
VkImageObj color_image(m_device), ds_image(m_device), depth_image(m_device);
color_image.Init(128, 128, 1, VK_FORMAT_R32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT);
depth_image.Init(128, 128, 1, VK_FORMAT_D32_SFLOAT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ds_image.Init(128, 128, 1, ds_format, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(color_image.initialized());
ASSERT_TRUE(depth_image.initialized());
ASSERT_TRUE(ds_image.initialized());
VkImageCopy copyRegion;
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset = {0, 0, 0};
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset = {64, 0, 0};
copyRegion.extent = {64, 128, 1};
// Submitting command before command buffer is in recording state
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"You must call vkBeginCommandBuffer"); // "VUID-vkCmdCopyImage-commandBuffer-recording");
vkCmdCopyImage(m_commandBuffer->handle(), depth_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
m_commandBuffer->begin();
// Src and dest aspect masks don't match
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
bool ycbcr = (DeviceExtensionEnabled(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME) ||
(m_device->props.apiVersion >= VK_API_VERSION_1_1));
std::string vuid = (ycbcr ? "VUID-VkImageCopy-srcImage-01551" : "VUID-VkImageCopy-aspectMask-00137");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
vkCmdCopyImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, ds_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
// Illegal combinations of aspect bits
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167");
// These aspect/format mismatches are redundant but unavoidable here
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
// same test for dstSubresource
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT; // color must be alone
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00167");
// These aspect/format mismatches are redundant but unavoidable here
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
// Metadata aspect is illegal
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168");
// These aspect/format mismatches are redundant but unavoidable here
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
// same test for dstSubresource
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_METADATA_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageSubresourceLayers-aspectMask-00168");
// These aspect/format mismatches are redundant but unavoidable here
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, color_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
// Aspect mask doesn't match source image format
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00142");
// Again redundant but unavoidable
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats");
vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
// Aspect mask doesn't match dest image format
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkImageCopy-aspectMask-00143");
// Again redundant but unavoidable
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "unmatched source and dest image depth/stencil formats");
vkCmdCopyImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_GENERAL, depth_image.handle(),
VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, ResolveImageLowSampleCount) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdResolveImage called with source sample count less than 2.");
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of sample count 1 and try to Resolve between them
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 1;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
image_create_info.flags = 0;
VkImageObj srcImage(m_device);
srcImage.init(&image_create_info);
ASSERT_TRUE(srcImage.initialized());
VkImageObj dstImage(m_device);
dstImage.init(&image_create_info);
ASSERT_TRUE(dstImage.initialized());
m_commandBuffer->begin();
VkImageResolve resolveRegion;
resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.srcSubresource.mipLevel = 0;
resolveRegion.srcSubresource.baseArrayLayer = 0;
resolveRegion.srcSubresource.layerCount = 1;
resolveRegion.srcOffset.x = 0;
resolveRegion.srcOffset.y = 0;
resolveRegion.srcOffset.z = 0;
resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.dstSubresource.mipLevel = 0;
resolveRegion.dstSubresource.baseArrayLayer = 0;
resolveRegion.dstSubresource.layerCount = 1;
resolveRegion.dstOffset.x = 0;
resolveRegion.dstOffset.y = 0;
resolveRegion.dstOffset.z = 0;
resolveRegion.extent.width = 1;
resolveRegion.extent.height = 1;
resolveRegion.extent.depth = 1;
m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&resolveRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ResolveImageHighSampleCount) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdResolveImage called with dest sample count greater than 1.");
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of sample count 4 and try to Resolve between them
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 1;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_4_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.usage =
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.flags = 0;
VkImageObj srcImage(m_device);
srcImage.init(&image_create_info);
ASSERT_TRUE(srcImage.initialized());
VkImageObj dstImage(m_device);
dstImage.init(&image_create_info);
ASSERT_TRUE(dstImage.initialized());
m_commandBuffer->begin();
// Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest?
// VK_IMAGE_LAYOUT_UNDEFINED = 0,
// VK_IMAGE_LAYOUT_GENERAL = 1,
VkImageResolve resolveRegion;
resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.srcSubresource.mipLevel = 0;
resolveRegion.srcSubresource.baseArrayLayer = 0;
resolveRegion.srcSubresource.layerCount = 1;
resolveRegion.srcOffset.x = 0;
resolveRegion.srcOffset.y = 0;
resolveRegion.srcOffset.z = 0;
resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.dstSubresource.mipLevel = 0;
resolveRegion.dstSubresource.baseArrayLayer = 0;
resolveRegion.dstSubresource.layerCount = 1;
resolveRegion.dstOffset.x = 0;
resolveRegion.dstOffset.y = 0;
resolveRegion.dstOffset.z = 0;
resolveRegion.extent.width = 1;
resolveRegion.extent.height = 1;
resolveRegion.extent.depth = 1;
m_commandBuffer->ResolveImage(srcImage.handle(), VK_IMAGE_LAYOUT_GENERAL, dstImage.handle(), VK_IMAGE_LAYOUT_GENERAL, 1,
&resolveRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ResolveImageFormatMismatch) {
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
"vkCmdResolveImage called with unmatched source and dest formats.");
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of different types and try to copy between them
VkImage srcImage;
VkImage dstImage;
VkDeviceMemory srcMem;
VkDeviceMemory destMem;
VkMemoryRequirements memReqs;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 1;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage);
ASSERT_VK_SUCCESS(err);
// Set format to something other than source image
image_create_info.format = VK_FORMAT_R32_SFLOAT;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage);
ASSERT_VK_SUCCESS(err);
// Allocate memory
VkMemoryAllocateInfo memAlloc = {};
memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memAlloc.pNext = NULL;
memAlloc.allocationSize = 0;
memAlloc.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs);
memAlloc.allocationSize = memReqs.size;
pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs);
memAlloc.allocationSize = memReqs.size;
pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
// Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest?
// VK_IMAGE_LAYOUT_UNDEFINED = 0,
// VK_IMAGE_LAYOUT_GENERAL = 1,
VkImageResolve resolveRegion;
resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.srcSubresource.mipLevel = 0;
resolveRegion.srcSubresource.baseArrayLayer = 0;
resolveRegion.srcSubresource.layerCount = 1;
resolveRegion.srcOffset.x = 0;
resolveRegion.srcOffset.y = 0;
resolveRegion.srcOffset.z = 0;
resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.dstSubresource.mipLevel = 0;
resolveRegion.dstSubresource.baseArrayLayer = 0;
resolveRegion.dstSubresource.layerCount = 1;
resolveRegion.dstOffset.x = 0;
resolveRegion.dstOffset.y = 0;
resolveRegion.dstOffset.z = 0;
resolveRegion.extent.width = 1;
resolveRegion.extent.height = 1;
resolveRegion.extent.depth = 1;
m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), srcImage, NULL);
vkDestroyImage(m_device->device(), dstImage, NULL);
vkFreeMemory(m_device->device(), srcMem, NULL);
vkFreeMemory(m_device->device(), destMem, NULL);
}
TEST_F(VkLayerTest, ResolveImageTypeMismatch) {
VkResult err;
bool pass;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_WARNING_BIT_EXT,
"vkCmdResolveImage called with unmatched source and dest image types.");
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of different types and try to copy between them
VkImage srcImage;
VkImage dstImage;
VkDeviceMemory srcMem;
VkDeviceMemory destMem;
VkMemoryRequirements memReqs;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 1;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &srcImage);
ASSERT_VK_SUCCESS(err);
image_create_info.imageType = VK_IMAGE_TYPE_1D;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &dstImage);
ASSERT_VK_SUCCESS(err);
// Allocate memory
VkMemoryAllocateInfo memAlloc = {};
memAlloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memAlloc.pNext = NULL;
memAlloc.allocationSize = 0;
memAlloc.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), srcImage, &memReqs);
memAlloc.allocationSize = memReqs.size;
pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &srcMem);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), dstImage, &memReqs);
memAlloc.allocationSize = memReqs.size;
pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &memAlloc, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memAlloc, NULL, &destMem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), srcImage, srcMem, 0);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), dstImage, destMem, 0);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
// Need memory barrier to VK_IMAGE_LAYOUT_GENERAL for source and dest?
// VK_IMAGE_LAYOUT_UNDEFINED = 0,
// VK_IMAGE_LAYOUT_GENERAL = 1,
VkImageResolve resolveRegion;
resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.srcSubresource.mipLevel = 0;
resolveRegion.srcSubresource.baseArrayLayer = 0;
resolveRegion.srcSubresource.layerCount = 1;
resolveRegion.srcOffset.x = 0;
resolveRegion.srcOffset.y = 0;
resolveRegion.srcOffset.z = 0;
resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.dstSubresource.mipLevel = 0;
resolveRegion.dstSubresource.baseArrayLayer = 0;
resolveRegion.dstSubresource.layerCount = 1;
resolveRegion.dstOffset.x = 0;
resolveRegion.dstOffset.y = 0;
resolveRegion.dstOffset.z = 0;
resolveRegion.extent.width = 1;
resolveRegion.extent.height = 1;
resolveRegion.extent.depth = 1;
m_commandBuffer->ResolveImage(srcImage, VK_IMAGE_LAYOUT_GENERAL, dstImage, VK_IMAGE_LAYOUT_GENERAL, 1, &resolveRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), srcImage, NULL);
vkDestroyImage(m_device->device(), dstImage, NULL);
vkFreeMemory(m_device->device(), srcMem, NULL);
vkFreeMemory(m_device->device(), destMem, NULL);
}
TEST_F(VkLayerTest, ResolveImageLayoutMismatch) {
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of different types and try to copy between them
VkImageObj srcImage(m_device);
VkImageObj dstImage(m_device);
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage =
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.flags = 0;
srcImage.init(&image_create_info);
ASSERT_TRUE(srcImage.initialized());
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
dstImage.init(&image_create_info);
ASSERT_TRUE(dstImage.initialized());
m_commandBuffer->begin();
// source image must have valid contents before resolve
VkClearColorValue clear_color = {{0, 0, 0, 0}};
VkImageSubresourceRange subresource = {};
subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresource.layerCount = 1;
subresource.levelCount = 1;
srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource);
srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkImageResolve resolveRegion;
resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.srcSubresource.mipLevel = 0;
resolveRegion.srcSubresource.baseArrayLayer = 0;
resolveRegion.srcSubresource.layerCount = 1;
resolveRegion.srcOffset.x = 0;
resolveRegion.srcOffset.y = 0;
resolveRegion.srcOffset.z = 0;
resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.dstSubresource.mipLevel = 0;
resolveRegion.dstSubresource.baseArrayLayer = 0;
resolveRegion.dstSubresource.layerCount = 1;
resolveRegion.dstOffset.x = 0;
resolveRegion.dstOffset.y = 0;
resolveRegion.dstOffset.z = 0;
resolveRegion.extent.width = 1;
resolveRegion.extent.height = 1;
resolveRegion.extent.depth = 1;
// source image layout mismatch
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcImageLayout-00260");
m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_GENERAL, dstImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &resolveRegion);
m_errorMonitor->VerifyFound();
// dst image layout mismatch
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstImageLayout-00262");
m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(), VK_IMAGE_LAYOUT_GENERAL,
1, &resolveRegion);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, ResolveInvalidSubresource) {
ASSERT_NO_FATAL_FAILURE(Init());
// Create two images of different types and try to copy between them
VkImageObj srcImage(m_device);
VkImageObj dstImage(m_device);
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 32;
image_create_info.extent.height = 32;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_2_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage =
VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.flags = 0;
srcImage.init(&image_create_info);
ASSERT_TRUE(srcImage.initialized());
// Note: Some implementations expect color attachment usage for any
// multisample surface
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
dstImage.init(&image_create_info);
ASSERT_TRUE(dstImage.initialized());
m_commandBuffer->begin();
// source image must have valid contents before resolve
VkClearColorValue clear_color = {{0, 0, 0, 0}};
VkImageSubresourceRange subresource = {};
subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresource.layerCount = 1;
subresource.levelCount = 1;
srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
m_commandBuffer->ClearColorImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_color, 1, &subresource);
srcImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
dstImage.SetLayout(m_commandBuffer, VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
VkImageResolve resolveRegion;
resolveRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.srcSubresource.mipLevel = 0;
resolveRegion.srcSubresource.baseArrayLayer = 0;
resolveRegion.srcSubresource.layerCount = 1;
resolveRegion.srcOffset.x = 0;
resolveRegion.srcOffset.y = 0;
resolveRegion.srcOffset.z = 0;
resolveRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
resolveRegion.dstSubresource.mipLevel = 0;
resolveRegion.dstSubresource.baseArrayLayer = 0;
resolveRegion.dstSubresource.layerCount = 1;
resolveRegion.dstOffset.x = 0;
resolveRegion.dstOffset.y = 0;
resolveRegion.dstOffset.z = 0;
resolveRegion.extent.width = 1;
resolveRegion.extent.height = 1;
resolveRegion.extent.depth = 1;
// invalid source mip level
resolveRegion.srcSubresource.mipLevel = image_create_info.mipLevels;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01709");
m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion);
m_errorMonitor->VerifyFound();
resolveRegion.srcSubresource.mipLevel = 0;
// invalid dest mip level
resolveRegion.dstSubresource.mipLevel = image_create_info.mipLevels;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01710");
m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion);
m_errorMonitor->VerifyFound();
resolveRegion.dstSubresource.mipLevel = 0;
// invalid source array layer range
resolveRegion.srcSubresource.baseArrayLayer = image_create_info.arrayLayers;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-srcSubresource-01711");
m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion);
m_errorMonitor->VerifyFound();
resolveRegion.srcSubresource.baseArrayLayer = 0;
// invalid dest array layer range
resolveRegion.dstSubresource.baseArrayLayer = image_create_info.arrayLayers;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdResolveImage-dstSubresource-01712");
m_commandBuffer->ResolveImage(srcImage.image(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage.image(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &resolveRegion);
m_errorMonitor->VerifyFound();
resolveRegion.dstSubresource.baseArrayLayer = 0;
m_commandBuffer->end();
}
TEST_F(VkLayerTest, DepthStencilImageViewWithColorAspectBitError) {
// Create a single Image descriptor and cause it to first hit an error due
// to using a DS format, then cause it to hit error due to COLOR_BIT not
// set in aspect
// The image format check comes 2nd in validation so we trigger it first,
// then when we cause aspect fail next, bad format check will be preempted
VkResult err;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"Combination depth/stencil image formats can have only the ");
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s Couldn't find depth stencil format.\n", kSkipPrefix);
return;
}
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptorSet;
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
VkImage image_bad;
VkImage image_good;
// One bad format and one good format for Color attachment
const VkFormat tex_format_bad = depth_format;
const VkFormat tex_format_good = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t tex_width = 32;
const int32_t tex_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = tex_format_bad;
image_create_info.extent.width = tex_width;
image_create_info.extent.height = tex_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
image_create_info.flags = 0;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_bad);
ASSERT_VK_SUCCESS(err);
image_create_info.format = tex_format_good;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image_good);
ASSERT_VK_SUCCESS(err);
// ---Bind image memory---
VkMemoryRequirements img_mem_reqs;
vkGetImageMemoryRequirements(m_device->device(), image_bad, &img_mem_reqs);
VkMemoryAllocateInfo image_alloc_info = {};
image_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
image_alloc_info.pNext = NULL;
image_alloc_info.memoryTypeIndex = 0;
image_alloc_info.allocationSize = img_mem_reqs.size;
bool pass =
m_device->phy().set_memory_type(img_mem_reqs.memoryTypeBits, &image_alloc_info, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
ASSERT_TRUE(pass);
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &image_alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindImageMemory(m_device->device(), image_bad, mem, 0);
ASSERT_VK_SUCCESS(err);
// -----------------------
VkImageViewCreateInfo image_view_create_info = {};
image_view_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
image_view_create_info.image = image_bad;
image_view_create_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
image_view_create_info.format = tex_format_bad;
image_view_create_info.subresourceRange.baseArrayLayer = 0;
image_view_create_info.subresourceRange.baseMipLevel = 0;
image_view_create_info.subresourceRange.layerCount = 1;
image_view_create_info.subresourceRange.levelCount = 1;
image_view_create_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT;
VkImageView view;
err = vkCreateImageView(m_device->device(), &image_view_create_info, NULL, &view);
m_errorMonitor->VerifyFound();
vkDestroyImage(m_device->device(), image_bad, NULL);
vkDestroyImage(m_device->device(), image_good, NULL);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
}
TEST_F(VkLayerTest, ClearImageErrors) {
TEST_DESCRIPTION("Call ClearColorImage w/ a depth|stencil image and ClearDepthStencilImage with a color image.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
// Color image
VkClearColorValue clear_color;
memset(clear_color.uint32, 0, sizeof(uint32_t) * 4);
VkMemoryPropertyFlags reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
const VkFormat color_format = VK_FORMAT_B8G8R8A8_UNORM;
const int32_t img_width = 32;
const int32_t img_height = 32;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = color_format;
image_create_info.extent.width = img_width;
image_create_info.extent.height = img_height;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_LINEAR;
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT;
vk_testing::Image color_image_no_transfer;
color_image_no_transfer.init(*m_device, image_create_info, reqs);
image_create_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image color_image;
color_image.init(*m_device, image_create_info, reqs);
const VkImageSubresourceRange color_range = vk_testing::Image::subresource_range(image_create_info, VK_IMAGE_ASPECT_COLOR_BIT);
// Depth/Stencil image
VkClearDepthStencilValue clear_value = {0};
reqs = 0; // don't need HOST_VISIBLE DS image
VkImageCreateInfo ds_image_create_info = vk_testing::Image::create_info();
ds_image_create_info.imageType = VK_IMAGE_TYPE_2D;
ds_image_create_info.format = VK_FORMAT_D16_UNORM;
ds_image_create_info.extent.width = 64;
ds_image_create_info.extent.height = 64;
ds_image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
ds_image_create_info.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image ds_image;
ds_image.init(*m_device, ds_image_create_info, reqs);
const VkImageSubresourceRange ds_range = vk_testing::Image::subresource_range(ds_image_create_info, VK_IMAGE_ASPECT_DEPTH_BIT);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "vkCmdClearColorImage called with depth/stencil image.");
vkCmdClearColorImage(m_commandBuffer->handle(), ds_image.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1, &color_range);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdClearColorImage called with image created without VK_IMAGE_USAGE_TRANSFER_DST_BIT");
vkCmdClearColorImage(m_commandBuffer->handle(), color_image_no_transfer.handle(), VK_IMAGE_LAYOUT_GENERAL, &clear_color, 1,
&color_range);
m_errorMonitor->VerifyFound();
// Call CmdClearDepthStencilImage with color image
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"vkCmdClearDepthStencilImage called without a depth/stencil image.");
vkCmdClearDepthStencilImage(m_commandBuffer->handle(), color_image.handle(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clear_value,
1, &ds_range);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CommandQueueFlags) {
TEST_DESCRIPTION(
"Allocate a command buffer on a queue that does not support graphics and try to issue a graphics-only command");
ASSERT_NO_FATAL_FAILURE(Init());
uint32_t queueFamilyIndex = m_device->QueueFamilyWithoutCapabilities(VK_QUEUE_GRAPHICS_BIT);
if (queueFamilyIndex == UINT32_MAX) {
printf("%s Non-graphics queue family not found; skipped.\n", kSkipPrefix);
return;
} else {
// Create command pool on a non-graphics queue
VkCommandPoolObj command_pool(m_device, queueFamilyIndex);
// Setup command buffer on pool
VkCommandBufferObj command_buffer(m_device, &command_pool);
command_buffer.begin();
// Issue a graphics only command
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-commandBuffer-cmdpool");
VkViewport viewport = {0, 0, 16, 16, 0, 1};
command_buffer.SetViewport(0, 1, &viewport);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, ExecuteUnrecordedSecondaryCB) {
TEST_DESCRIPTION("Attempt vkCmdExecuteCommands with a CB in the initial state");
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandBufferObj secondary(m_device, m_commandPool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
// never record secondary
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdExecuteCommands-pCommandBuffers-00089");
m_commandBuffer->begin();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary.handle());
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, ExecuteUnrecordedPrimaryCB) {
TEST_DESCRIPTION("Attempt vkQueueSubmit with a CB in the initial state");
ASSERT_NO_FATAL_FAILURE(Init());
// never record m_commandBuffer
VkSubmitInfo si = {};
si.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
si.commandBufferCount = 1;
si.pCommandBuffers = &m_commandBuffer->handle();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkQueueSubmit-pCommandBuffers-00072");
vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, ExtensionNotEnabled) {
TEST_DESCRIPTION("Validate that using an API from an unenabled extension returns an error");
// Do NOT enable VK_KHR_maintenance1
ASSERT_NO_FATAL_FAILURE(Init());
// TODO: Main1 is ALWAYS enabled in 1.1. Re-write test with an extension present in both 1.0 and 1.1
if (m_device->props.apiVersion >= VK_API_VERSION_1_1) {
printf("%s Device has apiVersion greater than 1.0 -- skipping extension enabled check.\n", kSkipPrefix);
return;
}
// Find address of extension API
PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR =
(PFN_vkTrimCommandPoolKHR)vkGetDeviceProcAddr(m_device->handle(), "vkTrimCommandPoolKHR");
if (vkTrimCommandPoolKHR == nullptr) {
printf("%s Maintenance1 not supported by device; skipped.\n", kSkipPrefix);
return;
}
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"but its required extension VK_KHR_maintenance1 has not been enabled");
vkTrimCommandPoolKHR(m_device->handle(), m_commandPool->handle(), (VkCommandPoolTrimFlags)0);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, Maintenance1AndNegativeViewport) {
TEST_DESCRIPTION("Attempt to enable AMD_negative_viewport_height and Maintenance1_KHR extension simultaneously");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (!((DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) &&
(DeviceExtensionSupported(gpu(), nullptr, VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME)))) {
printf("%s Maintenance1 and AMD_negative viewport height extensions not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
vk_testing::QueueCreateInfoArray queue_info(m_device->queue_props);
const char *extension_names[2] = {"VK_KHR_maintenance1", "VK_AMD_negative_viewport_height"};
VkDevice testDevice;
VkDeviceCreateInfo device_create_info = {};
auto features = m_device->phy().features();
device_create_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_create_info.pNext = NULL;
device_create_info.queueCreateInfoCount = queue_info.size();
device_create_info.pQueueCreateInfos = queue_info.data();
device_create_info.enabledLayerCount = 0;
device_create_info.ppEnabledLayerNames = NULL;
device_create_info.enabledExtensionCount = 2;
device_create_info.ppEnabledExtensionNames = (const char *const *)extension_names;
device_create_info.pEnabledFeatures = &features;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDeviceCreateInfo-ppEnabledExtensionNames-00374");
// The following unexpected error is coming from the LunarG loader. Do not make it a desired message because platforms that do
// not use the LunarG loader (e.g. Android) will not see the message and the test will fail.
m_errorMonitor->SetUnexpectedError("Failed to create device chain.");
vkCreateDevice(gpu(), &device_create_info, NULL, &testDevice);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidCreateDescriptorPool) {
TEST_DESCRIPTION("Attempt to create descriptor pool with invalid parameters");
ASSERT_NO_FATAL_FAILURE(Init());
const uint32_t default_descriptor_count = 1;
const VkDescriptorPoolSize dp_size_template{VK_DESCRIPTOR_TYPE_SAMPLER, default_descriptor_count};
const VkDescriptorPoolCreateInfo dp_ci_template{VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
nullptr, // pNext
0, // flags
1, // maxSets
1, // poolSizeCount
&dp_size_template};
// try maxSets = 0
{
VkDescriptorPoolCreateInfo invalid_dp_ci = dp_ci_template;
invalid_dp_ci.maxSets = 0; // invalid maxSets value
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolCreateInfo-maxSets-00301");
{
VkDescriptorPool pool;
vkCreateDescriptorPool(m_device->device(), &invalid_dp_ci, nullptr, &pool);
}
m_errorMonitor->VerifyFound();
}
// try descriptorCount = 0
{
VkDescriptorPoolSize invalid_dp_size = dp_size_template;
invalid_dp_size.descriptorCount = 0; // invalid descriptorCount value
VkDescriptorPoolCreateInfo dp_ci = dp_ci_template;
dp_ci.pPoolSizes = &invalid_dp_size;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorPoolSize-descriptorCount-00302");
{
VkDescriptorPool pool;
vkCreateDescriptorPool(m_device->device(), &dp_ci, nullptr, &pool);
}
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, InvalidCreateBufferSize) {
TEST_DESCRIPTION("Attempt to create VkBuffer with size of zero");
ASSERT_NO_FATAL_FAILURE(Init());
VkBufferCreateInfo info = {};
info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkBufferCreateInfo-size-00912");
info.size = 0;
VkBuffer buffer;
vkCreateBuffer(m_device->device(), &info, nullptr, &buffer);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SetDynViewportParamTests) {
TEST_DESCRIPTION("Test parameters of vkCmdSetViewport without multiViewport feature");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0};
const VkViewport viewports[] = {vp, vp};
m_commandBuffer->begin();
// array tests
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224");
vkCmdSetViewport(m_commandBuffer->handle(), 1, 1, viewports);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength");
vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225");
vkCmdSetViewport(m_commandBuffer->handle(), 0, 2, viewports);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength");
vkCmdSetViewport(m_commandBuffer->handle(), 1, 0, viewports);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01224");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-01225");
vkCmdSetViewport(m_commandBuffer->handle(), 1, 2, viewports);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter");
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, nullptr);
m_errorMonitor->VerifyFound();
// core viewport tests
using std::vector;
struct TestCase {
VkViewport vp;
std::string veid;
};
// not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit
const auto one_past_max_w = NearestGreater(static_cast<float>(m_device->props.limits.maxViewportDimensions[0]));
const auto one_past_max_h = NearestGreater(static_cast<float>(m_device->props.limits.maxViewportDimensions[1]));
const auto min_bound = m_device->props.limits.viewportBoundsRange[0];
const auto max_bound = m_device->props.limits.viewportBoundsRange[1];
const auto one_before_min_bounds = NearestSmaller(min_bound);
const auto one_past_max_bounds = NearestGreater(max_bound);
const auto below_zero = NearestSmaller(0.0f);
const auto past_one = NearestGreater(1.0f);
vector<TestCase> test_cases = {
{{0.0, 0.0, 0.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"},
{{0.0, 0.0, one_past_max_w, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01771"},
{{0.0, 0.0, NAN, 64.0, 0.0, 1.0}, "VUID-VkViewport-width-01770"},
{{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, "VUID-VkViewport-height-01773"},
{{one_before_min_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"},
{{one_past_max_bounds, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"},
{{NAN, 0.0, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01774"},
{{0.0, one_before_min_bounds, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"},
{{0.0, NAN, 64.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-y-01775"},
{{max_bound, 0.0, 1.0, 64.0, 0.0, 1.0}, "VUID-VkViewport-x-01232"},
{{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, "VUID-VkViewport-y-01233"},
{{0.0, 0.0, 64.0, 64.0, below_zero, 1.0}, "VUID-VkViewport-minDepth-01234"},
{{0.0, 0.0, 64.0, 64.0, past_one, 1.0}, "VUID-VkViewport-minDepth-01234"},
{{0.0, 0.0, 64.0, 64.0, NAN, 1.0}, "VUID-VkViewport-minDepth-01234"},
{{0.0, 0.0, 64.0, 64.0, 0.0, below_zero}, "VUID-VkViewport-maxDepth-01235"},
{{0.0, 0.0, 64.0, 64.0, 0.0, past_one}, "VUID-VkViewport-maxDepth-01235"},
{{0.0, 0.0, 64.0, 64.0, 0.0, NAN}, "VUID-VkViewport-maxDepth-01235"},
};
if (m_device->props.apiVersion < VK_API_VERSION_1_1) {
test_cases.push_back({{0.0, 0.0, 64.0, 0.0, 0.0, 1.0}, "VUID-VkViewport-height-01772"});
test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01772"});
} else {
test_cases.push_back({{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, "VUID-VkViewport-height-01773"});
}
for (const auto &test_case : test_cases) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.veid);
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp);
m_errorMonitor->VerifyFound();
}
}
void NegHeightViewportTests(VkDeviceObj *m_device, VkCommandBufferObj *m_commandBuffer, ErrorMonitor *m_errorMonitor) {
const auto &limits = m_device->props.limits;
m_commandBuffer->begin();
using std::vector;
struct TestCase {
VkViewport vp;
vector<std::string> vuids;
};
// not necessarily boundary values (unspecified cast rounding), but guaranteed to be over limit
const auto one_before_min_h = NearestSmaller(-static_cast<float>(limits.maxViewportDimensions[1]));
const auto one_past_max_h = NearestGreater(static_cast<float>(limits.maxViewportDimensions[1]));
const auto min_bound = limits.viewportBoundsRange[0];
const auto max_bound = limits.viewportBoundsRange[1];
const auto one_before_min_bound = NearestSmaller(min_bound);
const auto one_past_max_bound = NearestGreater(max_bound);
const vector<TestCase> test_cases = {{{0.0, 0.0, 64.0, one_before_min_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}},
{{0.0, 0.0, 64.0, one_past_max_h, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}},
{{0.0, 0.0, 64.0, NAN, 0.0, 1.0}, {"VUID-VkViewport-height-01773"}},
{{0.0, one_before_min_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01775"}},
{{0.0, one_past_max_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01776"}},
{{0.0, min_bound, 64.0, -1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01777"}},
{{0.0, max_bound, 64.0, 1.0, 0.0, 1.0}, {"VUID-VkViewport-y-01233"}}};
for (const auto &test_case : test_cases) {
for (const auto vuid : test_case.vuids) {
if (vuid == "VUID-Undefined")
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"is less than VkPhysicalDeviceLimits::viewportBoundsRange[0]");
else
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, vuid);
}
vkCmdSetViewport(m_commandBuffer->handle(), 0, 1, &test_case.vp);
m_errorMonitor->VerifyFound();
}
}
TEST_F(VkLayerTest, SetDynViewportParamMaintenance1Tests) {
TEST_DESCRIPTION("Verify errors are detected on misuse of SetViewport with a negative viewport extension enabled.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
} else {
printf("%s VK_KHR_maintenance1 extension not supported -- skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
NegHeightViewportTests(m_device, m_commandBuffer, m_errorMonitor);
}
TEST_F(VkLayerTest, SetDynViewportParamMultiviewportTests) {
TEST_DESCRIPTION("Test parameters of vkCmdSetViewport with multiViewport feature enabled");
ASSERT_NO_FATAL_FAILURE(Init());
if (!m_device->phy().features().multiViewport) {
printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix);
return;
}
const auto max_viewports = m_device->props.limits.maxViewports;
const uint32_t too_many_viewports = 65536 + 1; // let's say this is too much to allocate pViewports for
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength");
vkCmdSetViewport(m_commandBuffer->handle(), 0, 0, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-pViewports-parameter");
vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports, nullptr);
m_errorMonitor->VerifyFound();
if (max_viewports >= too_many_viewports) {
printf(
"%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping "
"part of "
"test.\n",
kSkipPrefix);
return;
}
const VkViewport vp = {0.0, 0.0, 64.0, 64.0, 0.0, 1.0};
const std::vector<VkViewport> viewports(max_viewports + 1, vp);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223");
vkCmdSetViewport(m_commandBuffer->handle(), 0, max_viewports + 1, viewports.data());
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223");
vkCmdSetViewport(m_commandBuffer->handle(), max_viewports, 1, viewports.data());
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223");
vkCmdSetViewport(m_commandBuffer->handle(), 1, max_viewports, viewports.data());
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-viewportCount-arraylength");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetViewport-firstViewport-01223");
vkCmdSetViewport(m_commandBuffer->handle(), max_viewports + 1, 0, viewports.data());
m_errorMonitor->VerifyFound();
}
//
// POSITIVE VALIDATION TESTS
//
// These tests do not expect to encounter ANY validation errors pass only if this is true
TEST_F(VkPositiveLayerTest, UncompressedToCompressedImageCopy) {
TEST_DESCRIPTION("Image copies between compressed and uncompressed images");
ASSERT_NO_FATAL_FAILURE(Init());
// Verify format support
// Size-compatible (64-bit) formats. Uncompressed is 64 bits per texel, compressed is 64 bits per 4x4 block (or 4bpt).
if (!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_R16G16B16A16_UINT, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR) ||
!ImageFormatAndFeaturesSupported(gpu(), VK_FORMAT_BC1_RGBA_SRGB_BLOCK, VK_IMAGE_TILING_OPTIMAL,
VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR | VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR)) {
printf("%s Required formats/features not supported - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix);
return;
}
VkImageObj uncomp_10x10t_image(m_device); // Size = 10 * 10 * 64 = 6400
VkImageObj comp_10x10b_40x40t_image(m_device); // Size = 40 * 40 * 4 = 6400
uncomp_10x10t_image.Init(10, 10, 1, VK_FORMAT_R16G16B16A16_UINT,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
comp_10x10b_40x40t_image.Init(40, 40, 1, VK_FORMAT_BC1_RGBA_SRGB_BLOCK,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
if (!uncomp_10x10t_image.initialized() || !comp_10x10b_40x40t_image.initialized()) {
printf("%s Unable to initialize surfaces - UncompressedToCompressedImageCopy skipped.\n", kSkipPrefix);
return;
}
// Both copies represent the same number of bytes. Bytes Per Texel = 1 for bc6, 16 for uncompressed
// Copy compressed to uncompressed
VkImageCopy copy_region = {};
copy_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
copy_region.srcSubresource.mipLevel = 0;
copy_region.dstSubresource.mipLevel = 0;
copy_region.srcSubresource.baseArrayLayer = 0;
copy_region.dstSubresource.baseArrayLayer = 0;
copy_region.srcSubresource.layerCount = 1;
copy_region.dstSubresource.layerCount = 1;
copy_region.srcOffset = {0, 0, 0};
copy_region.dstOffset = {0, 0, 0};
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
// Copy from uncompressed to compressed
copy_region.extent = {10, 10, 1}; // Dimensions in (uncompressed) texels
vkCmdCopyImage(m_commandBuffer->handle(), uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL,
comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
// And from compressed to uncompressed
copy_region.extent = {40, 40, 1}; // Dimensions in (compressed) texels
vkCmdCopyImage(m_commandBuffer->handle(), comp_10x10b_40x40t_image.handle(), VK_IMAGE_LAYOUT_GENERAL,
uncomp_10x10t_image.handle(), VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
}
TEST_F(VkPositiveLayerTest, DeleteDescriptorSetLayoutsBeforeDescriptorSets) {
TEST_DESCRIPTION("Create DSLayouts and DescriptorSets and then delete the DSLayouts before the DescriptorSets.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkResult err;
m_errorMonitor->ExpectSuccess();
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_SAMPLER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool_one;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool_one);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
VkDescriptorSet descriptorSet;
{
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool_one;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptorSet);
ASSERT_VK_SUCCESS(err);
} // ds_layout destroyed
err = vkFreeDescriptorSets(m_device->device(), ds_pool_one, 1, &descriptorSet);
vkDestroyDescriptorPool(m_device->device(), ds_pool_one, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CommandPoolDeleteWithReferences) {
TEST_DESCRIPTION("Ensure the validation layers bookkeeping tracks the implicit command buffer frees.");
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandPoolCreateInfo cmd_pool_info = {};
cmd_pool_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmd_pool_info.pNext = NULL;
cmd_pool_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
cmd_pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
cmd_pool_info.flags = 0;
VkCommandPool secondary_cmd_pool;
VkResult res = vkCreateCommandPool(m_device->handle(), &cmd_pool_info, NULL, &secondary_cmd_pool);
ASSERT_VK_SUCCESS(res);
VkCommandBufferAllocateInfo cmdalloc = vk_testing::CommandBuffer::create_info(secondary_cmd_pool);
cmdalloc.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
VkCommandBuffer secondary_cmds;
res = vkAllocateCommandBuffers(m_device->handle(), &cmdalloc, &secondary_cmds);
VkCommandBufferInheritanceInfo cmd_buf_inheritance_info = {};
cmd_buf_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
cmd_buf_inheritance_info.pNext = NULL;
cmd_buf_inheritance_info.renderPass = VK_NULL_HANDLE;
cmd_buf_inheritance_info.subpass = 0;
cmd_buf_inheritance_info.framebuffer = VK_NULL_HANDLE;
cmd_buf_inheritance_info.occlusionQueryEnable = VK_FALSE;
cmd_buf_inheritance_info.queryFlags = 0;
cmd_buf_inheritance_info.pipelineStatistics = 0;
VkCommandBufferBeginInfo secondary_begin = {};
secondary_begin.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
secondary_begin.pNext = NULL;
secondary_begin.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
secondary_begin.pInheritanceInfo = &cmd_buf_inheritance_info;
res = vkBeginCommandBuffer(secondary_cmds, &secondary_begin);
ASSERT_VK_SUCCESS(res);
vkEndCommandBuffer(secondary_cmds);
m_commandBuffer->begin();
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_cmds);
m_commandBuffer->end();
// DestroyCommandPool *implicitly* frees the command buffers allocated from it
vkDestroyCommandPool(m_device->handle(), secondary_cmd_pool, NULL);
// If bookkeeping has been lax, validating the reset will attempt to touch deleted data
res = vkResetCommandPool(m_device->handle(), m_commandPool->handle(), 0);
ASSERT_VK_SUCCESS(res);
}
TEST_F(VkLayerTest, SecondaryCommandBufferClearColorAttachmentsRenderArea) {
TEST_DESCRIPTION(
"Create a secondary command buffer with CmdClearAttachments call that has a rect outside of renderPass renderArea");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferAllocateInfo command_buffer_allocate_info = {};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = m_commandPool->handle();
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
command_buffer_allocate_info.commandBufferCount = 1;
VkCommandBuffer secondary_command_buffer;
ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer));
VkCommandBufferBeginInfo command_buffer_begin_info = {};
VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {};
command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
command_buffer_inheritance_info.renderPass = m_renderPass;
command_buffer_inheritance_info.framebuffer = m_framebuffer;
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags =
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info;
vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info);
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 0;
color_attachment.clearValue.color.float32[1] = 0;
color_attachment.clearValue.color.float32[2] = 0;
color_attachment.clearValue.color.float32[3] = 0;
color_attachment.colorAttachment = 0;
// x extent of 257 exceeds render area of 256
VkClearRect clear_rect = {{{0, 0}, {257, 32}}};
vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect);
vkEndCommandBuffer(secondary_command_buffer);
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdClearAttachments-pRects-00016");
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer);
m_errorMonitor->VerifyFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
}
TEST_F(VkPositiveLayerTest, SecondaryCommandBufferClearColorAttachments) {
TEST_DESCRIPTION("Create a secondary command buffer and record a CmdClearAttachments call into it");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkCommandBufferAllocateInfo command_buffer_allocate_info = {};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = m_commandPool->handle();
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
command_buffer_allocate_info.commandBufferCount = 1;
VkCommandBuffer secondary_command_buffer;
ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer));
VkCommandBufferBeginInfo command_buffer_begin_info = {};
VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {};
command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
command_buffer_inheritance_info.renderPass = m_renderPass;
command_buffer_inheritance_info.framebuffer = m_framebuffer;
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags =
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT | VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info;
vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info);
VkClearAttachment color_attachment;
color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
color_attachment.clearValue.color.float32[0] = 0;
color_attachment.clearValue.color.float32[1] = 0;
color_attachment.clearValue.color.float32[2] = 0;
color_attachment.clearValue.color.float32[3] = 0;
color_attachment.colorAttachment = 0;
VkClearRect clear_rect = {{{0, 0}, {32, 32}}};
vkCmdClearAttachments(secondary_command_buffer, 1, &color_attachment, 1, &clear_rect);
vkEndCommandBuffer(secondary_command_buffer);
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
vkCmdExecuteCommands(m_commandBuffer->handle(), 1, &secondary_command_buffer);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, SecondaryCommandBufferImageLayoutTransitions) {
TEST_DESCRIPTION("Perform an image layout transition in a secondary command buffer followed by a transition in the primary.");
VkResult err;
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s Couldn't find depth stencil format.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// Allocate a secondary and primary cmd buffer
VkCommandBufferAllocateInfo command_buffer_allocate_info = {};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = m_commandPool->handle();
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_SECONDARY;
command_buffer_allocate_info.commandBufferCount = 1;
VkCommandBuffer secondary_command_buffer;
ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &secondary_command_buffer));
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
VkCommandBuffer primary_command_buffer;
ASSERT_VK_SUCCESS(vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &primary_command_buffer));
VkCommandBufferBeginInfo command_buffer_begin_info = {};
VkCommandBufferInheritanceInfo command_buffer_inheritance_info = {};
command_buffer_inheritance_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
command_buffer_begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
command_buffer_begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
command_buffer_begin_info.pInheritanceInfo = &command_buffer_inheritance_info;
err = vkBeginCommandBuffer(secondary_command_buffer, &command_buffer_begin_info);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
img_barrier.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier.image = image.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(secondary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr,
0, nullptr, 1, &img_barrier);
err = vkEndCommandBuffer(secondary_command_buffer);
ASSERT_VK_SUCCESS(err);
// Now update primary cmd buffer to execute secondary and transitions image
command_buffer_begin_info.pInheritanceInfo = nullptr;
err = vkBeginCommandBuffer(primary_command_buffer, &command_buffer_begin_info);
ASSERT_VK_SUCCESS(err);
vkCmdExecuteCommands(primary_command_buffer, 1, &secondary_command_buffer);
VkImageMemoryBarrier img_barrier2 = {};
img_barrier2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier2.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier2.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier2.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier2.newLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
img_barrier2.image = image.handle();
img_barrier2.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier2.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier2.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
img_barrier2.subresourceRange.baseArrayLayer = 0;
img_barrier2.subresourceRange.baseMipLevel = 0;
img_barrier2.subresourceRange.layerCount = 1;
img_barrier2.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(primary_command_buffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0, nullptr, 0,
nullptr, 1, &img_barrier2);
err = vkEndCommandBuffer(primary_command_buffer);
ASSERT_VK_SUCCESS(err);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &primary_command_buffer;
err = vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
err = vkDeviceWaitIdle(m_device->device());
ASSERT_VK_SUCCESS(err);
vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &secondary_command_buffer);
vkFreeCommandBuffers(m_device->device(), m_commandPool->handle(), 1, &primary_command_buffer);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, IgnoreUnrelatedDescriptor) {
TEST_DESCRIPTION(
"Ensure that the vkUpdateDescriptorSets validation code is ignoring VkWriteDescriptorSet members that are not related to "
"the descriptor type specified by VkWriteDescriptorSet::descriptorType. Correct validation behavior will result in the "
"test running to completion without validation errors.");
const uintptr_t invalid_ptr = 0xcdcdcdcd;
ASSERT_NO_FATAL_FAILURE(Init());
// Image Case
{
m_errorMonitor->ExpectSuccess();
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
VkImageView view = image.targetView(VK_FORMAT_B8G8R8A8_UNORM);
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkDescriptorImageInfo image_info = {};
image_info.imageView = view;
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE;
descriptor_write.pImageInfo = &image_info;
// Set pBufferInfo and pTexelBufferView to invalid values, which should
// be
// ignored for descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE.
// This will most likely produce a crash if the parameter_validation
// layer
// does not correctly ignore pBufferInfo.
descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr);
descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
}
// Buffer Case
{
m_errorMonitor->ExpectSuccess();
VkBuffer buffer;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory buffer_memory;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkDescriptorBufferInfo buffer_info = {};
buffer_info.buffer = buffer;
buffer_info.offset = 0;
buffer_info.range = 1024;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.pBufferInfo = &buffer_info;
// Set pImageInfo and pTexelBufferView to invalid values, which should
// be
// ignored for descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER.
// This will most likely produce a crash if the parameter_validation
// layer
// does not correctly ignore pImageInfo.
descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr);
descriptor_write.pTexelBufferView = reinterpret_cast<const VkBufferView *>(invalid_ptr);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), buffer_memory, NULL);
}
// Texel Buffer Case
{
m_errorMonitor->ExpectSuccess();
VkBuffer buffer;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory buffer_memory;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
memory_info.allocationSize = memory_reqs.size;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
VkBufferViewCreateInfo buff_view_ci = {};
buff_view_ci.sType = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
buff_view_ci.buffer = buffer;
buff_view_ci.format = VK_FORMAT_R8_UNORM;
buff_view_ci.range = VK_WHOLE_SIZE;
VkBufferView buffer_view;
err = vkCreateBufferView(m_device->device(), &buff_view_ci, NULL, &buffer_view);
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
descriptor_write.pTexelBufferView = &buffer_view;
// Set pImageInfo and pBufferInfo to invalid values, which should be
// ignored for descriptorType ==
// VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER.
// This will most likely produce a crash if the parameter_validation
// layer
// does not correctly ignore pImageInfo and pBufferInfo.
descriptor_write.pImageInfo = reinterpret_cast<const VkDescriptorImageInfo *>(invalid_ptr);
descriptor_write.pBufferInfo = reinterpret_cast<const VkDescriptorBufferInfo *>(invalid_ptr);
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
vkDestroyBufferView(m_device->device(), buffer_view, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), buffer_memory, NULL);
}
}
TEST_F(VkPositiveLayerTest, ImmutableSamplerOnlyDescriptor) {
TEST_DESCRIPTION("Bind a DescriptorSet with only an immutable sampler and make sure that we don't warn for no update.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
VkSampler sampler;
VkResult err = vkCreateSampler(m_device->device(), &sampler_ci, NULL, &sampler);
ASSERT_VK_SUCCESS(err);
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_, 0,
nullptr);
m_errorMonitor->VerifyNotFound();
vkDestroySampler(m_device->device(), sampler, NULL);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
}
TEST_F(VkLayerTest, DuplicateDescriptorBinding) {
TEST_DESCRIPTION("Create a descriptor set layout with a duplicate binding number.");
ASSERT_NO_FATAL_FAILURE(Init());
// Create layout where two binding #s are "1"
static const uint32_t NUM_BINDINGS = 3;
VkDescriptorSetLayoutBinding dsl_binding[NUM_BINDINGS] = {};
dsl_binding[0].binding = 1;
dsl_binding[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding[0].descriptorCount = 1;
dsl_binding[0].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding[0].pImmutableSamplers = NULL;
dsl_binding[1].binding = 0;
dsl_binding[1].descriptorCount = 1;
dsl_binding[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding[1].descriptorCount = 1;
dsl_binding[1].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding[1].pImmutableSamplers = NULL;
dsl_binding[2].binding = 1; // Duplicate binding should cause error
dsl_binding[2].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding[2].descriptorCount = 1;
dsl_binding[2].stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding[2].pImmutableSamplers = NULL;
VkDescriptorSetLayoutCreateInfo ds_layout_ci = {};
ds_layout_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
ds_layout_ci.pNext = NULL;
ds_layout_ci.bindingCount = NUM_BINDINGS;
ds_layout_ci.pBindings = dsl_binding;
VkDescriptorSetLayout ds_layout;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279");
vkCreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, InvalidPushDescriptorSetLayout) {
TEST_DESCRIPTION("Create a push descriptor set layout with invalid bindings.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Find address of extension call and make the call
PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR =
(PFN_vkGetPhysicalDeviceProperties2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR");
assert(vkGetPhysicalDeviceProperties2KHR != nullptr);
// Get the push descriptor limits
auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
vkGetPhysicalDeviceProperties2KHR(m_device->phy().handle(), &prop2);
VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>();
ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
ds_layout_ci.bindingCount = 1;
ds_layout_ci.pBindings = &binding;
// Note that as binding is referenced in ds_layout_ci, it is effectively in the closure by reference as well.
auto test_create_ds_layout = [&ds_layout_ci, this](std::string error) {
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error);
vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
};
// Starting with the initial VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC type set above..
test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00280");
binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
test_create_ds_layout(
"VUID-VkDescriptorSetLayoutCreateInfo-flags-00280"); // This is the same VUID as above, just a second error condition.
if (!(push_descriptor_prop.maxPushDescriptors == std::numeric_limits<uint32_t>::max())) {
binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
binding.descriptorCount = push_descriptor_prop.maxPushDescriptors + 1;
test_create_ds_layout("VUID-VkDescriptorSetLayoutCreateInfo-flags-00281");
} else {
printf("%s maxPushDescriptors is set to maximum unit32_t value, skipping 'out of range test'.\n", kSkipPrefix);
}
}
TEST_F(VkLayerTest, PushDescriptorSetLayoutWithoutExtension) {
TEST_DESCRIPTION("Create a push descriptor set layout without loading the needed extension.");
ASSERT_NO_FATAL_FAILURE(Init());
VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>();
ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
ds_layout_ci.bindingCount = 1;
ds_layout_ci.pBindings = &binding;
std::string error = "Attempted to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR in ";
error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension ";
error = error + VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME;
error = error + " has not been enabled.";
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str());
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281");
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
}
TEST_F(VkLayerTest, DescriptorIndexingSetLayoutWithoutExtension) {
TEST_DESCRIPTION("Create an update_after_bind set layout without loading the needed extension.");
ASSERT_NO_FATAL_FAILURE(Init());
auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>();
ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
std::string error = "Attemped to use VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT in ";
error = error + "VkDescriptorSetLayoutCreateInfo::flags but its required extension ";
error = error + VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME;
error = error + " has not been enabled.";
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, error.c_str());
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
}
TEST_F(VkLayerTest, DescriptorIndexingSetLayout) {
TEST_DESCRIPTION("Exercise various create/allocate-time errors related to VK_EXT_descriptor_indexing.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
std::array<const char *, 2> required_device_extensions = {
{VK_KHR_MAINTENANCE3_EXTENSION_NAME, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME}};
for (auto device_extension : required_device_extensions) {
if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) {
m_device_extension_names.push_back(device_extension);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension);
return;
}
}
// Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind
auto indexingFeatures = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexingFeatures);
vkGetPhysicalDeviceFeatures2(gpu(), &features2);
indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE;
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2));
VkDescriptorBindingFlagsEXT flags = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT;
auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>();
flags_create_info.bindingCount = 1;
flags_create_info.pBindingFlags = &flags;
VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info);
ds_layout_ci.bindingCount = 1;
ds_layout_ci.pBindings = &binding;
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
// VU for VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount
flags_create_info.bindingCount = 2;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002");
VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
flags_create_info.bindingCount = 1;
// set is missing UPDATE_AFTER_BIND_POOL flag.
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000");
// binding uses a feature we disabled
m_errorMonitor->SetDesiredFailureMsg(
VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUniformBufferUpdateAfterBind-03005");
err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
ds_layout_ci.bindingCount = 0;
flags_create_info.bindingCount = 0;
err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
ASSERT_VK_SUCCESS(err);
VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount};
auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>();
dspci.poolSizeCount = 1;
dspci.pPoolSizes = &pool_size;
dspci.maxSets = 1;
VkDescriptorPool pool;
err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool);
ASSERT_VK_SUCCESS(err);
auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>();
ds_alloc_info.descriptorPool = pool;
ds_alloc_info.descriptorSetCount = 1;
ds_alloc_info.pSetLayouts = &ds_layout;
VkDescriptorSet ds = VK_NULL_HANDLE;
// mismatch between descriptor set and pool
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044");
vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
vkDestroyDescriptorPool(m_device->handle(), pool, nullptr);
if (indexingFeatures.descriptorBindingVariableDescriptorCount) {
ds_layout_ci.flags = 0;
ds_layout_ci.bindingCount = 1;
flags_create_info.bindingCount = 1;
flags = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT;
err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
ASSERT_VK_SUCCESS(err);
pool_size = {binding.descriptorType, binding.descriptorCount};
dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>();
dspci.poolSizeCount = 1;
dspci.pPoolSizes = &pool_size;
dspci.maxSets = 1;
err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool);
ASSERT_VK_SUCCESS(err);
auto count_alloc_info = lvl_init_struct<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>();
count_alloc_info.descriptorSetCount = 1;
// Set variable count larger than what was in the descriptor binding
uint32_t variable_count = 2;
count_alloc_info.pDescriptorCounts = &variable_count;
ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>(&count_alloc_info);
ds_alloc_info.descriptorPool = pool;
ds_alloc_info.descriptorSetCount = 1;
ds_alloc_info.pSetLayouts = &ds_layout;
ds = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046");
vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
vkDestroyDescriptorPool(m_device->handle(), pool, nullptr);
}
}
TEST_F(VkLayerTest, DescriptorIndexingUpdateAfterBind) {
TEST_DESCRIPTION("Exercise errors for updating a descriptor set after it is bound.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME) &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE3_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_MAINTENANCE3_EXTENSION_NAME);
} else {
printf("%s Descriptor Indexing or Maintenance3 Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
// Create a device that enables all supported indexing features except descriptorBindingUniformBufferUpdateAfterBind
auto indexingFeatures = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>();
auto features2 = lvl_init_struct<VkPhysicalDeviceFeatures2KHR>(&indexingFeatures);
vkGetPhysicalDeviceFeatures2(gpu(), &features2);
indexingFeatures.descriptorBindingUniformBufferUpdateAfterBind = VK_FALSE;
if (VK_FALSE == indexingFeatures.descriptorBindingStorageBufferUpdateAfterBind) {
printf("%s Test requires (unsupported) descriptorBindingStorageBufferUpdateAfterBind, skipping\n", kSkipPrefix);
return;
}
if (VK_FALSE == features2.features.fragmentStoresAndAtomics) {
printf("%s Test requires (unsupported) fragmentStoresAndAtomics, skipping\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT));
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorBindingFlagsEXT flags[2] = {0, VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT};
auto flags_create_info = lvl_init_struct<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>();
flags_create_info.bindingCount = 2;
flags_create_info.pBindingFlags = &flags[0];
// Descriptor set has two bindings - only the second is update_after_bind
VkDescriptorSetLayoutBinding binding[2] = {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
};
auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>(&flags_create_info);
ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
ds_layout_ci.bindingCount = 2;
ds_layout_ci.pBindings = &binding[0];
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
VkDescriptorPoolSize pool_sizes[2] = {
{binding[0].descriptorType, binding[0].descriptorCount},
{binding[1].descriptorType, binding[1].descriptorCount},
};
auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>();
dspci.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT;
dspci.poolSizeCount = 2;
dspci.pPoolSizes = &pool_sizes[0];
dspci.maxSets = 1;
VkDescriptorPool pool;
err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool);
ASSERT_VK_SUCCESS(err);
auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>();
ds_alloc_info.descriptorPool = pool;
ds_alloc_info.descriptorSetCount = 1;
ds_alloc_info.pSetLayouts = &ds_layout;
VkDescriptorSet ds = VK_NULL_HANDLE;
vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds);
ASSERT_VK_SUCCESS(err);
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 1024;
buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
VkBuffer dyub;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub);
ASSERT_VK_SUCCESS(err);
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device->device(), dyub, &mem_reqs);
VkMemoryAllocateInfo mem_alloc_info = {};
mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc_info.allocationSize = mem_reqs.size;
m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
err = vkAllocateMemory(m_device->device(), &mem_alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), dyub, mem, 0);
ASSERT_VK_SUCCESS(err);
VkDescriptorBufferInfo buffInfo[2] = {};
buffInfo[0].buffer = dyub;
buffInfo[0].offset = 0;
buffInfo[0].range = 1024;
VkWriteDescriptorSet descriptor_write[2] = {};
descriptor_write[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write[0].dstSet = ds;
descriptor_write[0].dstBinding = 0;
descriptor_write[0].descriptorCount = 1;
descriptor_write[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write[0].pBufferInfo = buffInfo;
descriptor_write[1] = descriptor_write[0];
descriptor_write[1].dstBinding = 1;
descriptor_write[1].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
VkPipelineLayout pipeline_layout;
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.setLayoutCount = 1;
pipeline_layout_ci.pSetLayouts = &ds_layout;
vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
// Create a dummy pipeline, since VL inspects which bindings are actually used at draw time
char const *vsSource =
"#version 450\n"
"void main(){\n"
" gl_Position = vec4(0);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"layout(set=0, binding=0) uniform foo0 { float x0; } bar0;\n"
"layout(set=0, binding=1) buffer foo1 { float x1; } bar1;\n"
"void main(){\n"
" color = vec4(bar0.x0 + bar1.x1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.SetViewport(m_viewports);
pipe.SetScissor(m_scissors);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.CreateVKPipeline(pipeline_layout, m_renderPass);
// Make both bindings valid before binding to the command buffer
vkUpdateDescriptorSets(m_device->device(), 2, &descriptor_write[0], 0, NULL);
m_errorMonitor->VerifyNotFound();
// Two subtests. First only updates the update_after_bind binding and expects
// no error. Second updates the other binding and expects an error when the
// command buffer is ended.
for (uint32_t i = 0; i < 2; ++i) {
m_commandBuffer->begin();
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, &ds, 0, NULL);
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
vkCmdDraw(m_commandBuffer->handle(), 0, 0, 0, 0);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
// Valid to update binding 1 after being bound
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[1], 0, NULL);
m_errorMonitor->VerifyNotFound();
if (i == 0) {
// expect no errors
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
} else {
// Invalid to update binding 0 after being bound. But the error is actually
// generated during vkEndCommandBuffer
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write[0], 0, NULL);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "is invalid because bound DescriptorSet");
vkEndCommandBuffer(m_commandBuffer->handle());
m_errorMonitor->VerifyFound();
}
}
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
vkDestroyDescriptorPool(m_device->handle(), pool, nullptr);
vkDestroyBuffer(m_device->handle(), dyub, NULL);
vkFreeMemory(m_device->handle(), mem, NULL);
vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, NULL);
}
TEST_F(VkLayerTest, AllocatePushDescriptorSet) {
TEST_DESCRIPTION("Attempt to allocate a push descriptor set.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
auto ds_layout_ci = lvl_init_struct<VkDescriptorSetLayoutCreateInfo>();
ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR;
ds_layout_ci.bindingCount = 1;
ds_layout_ci.pBindings = &binding;
VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE;
VkResult err = vkCreateDescriptorSetLayout(m_device->handle(), &ds_layout_ci, nullptr, &ds_layout);
ASSERT_VK_SUCCESS(err);
VkDescriptorPoolSize pool_size = {binding.descriptorType, binding.descriptorCount};
auto dspci = lvl_init_struct<VkDescriptorPoolCreateInfo>();
dspci.poolSizeCount = 1;
dspci.pPoolSizes = &pool_size;
dspci.maxSets = 1;
VkDescriptorPool pool;
err = vkCreateDescriptorPool(m_device->handle(), &dspci, nullptr, &pool);
ASSERT_VK_SUCCESS(err);
auto ds_alloc_info = lvl_init_struct<VkDescriptorSetAllocateInfo>();
ds_alloc_info.descriptorPool = pool;
ds_alloc_info.descriptorSetCount = 1;
ds_alloc_info.pSetLayouts = &ds_layout;
VkDescriptorSet ds = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308");
vkAllocateDescriptorSets(m_device->handle(), &ds_alloc_info, &ds);
m_errorMonitor->VerifyFound();
vkDestroyDescriptorPool(m_device->handle(), pool, nullptr);
vkDestroyDescriptorSetLayout(m_device->handle(), ds_layout, nullptr);
}
TEST_F(VkLayerTest, PushDescriptorSetCmdPushBadArgs) {
TEST_DESCRIPTION("Attempt to push a push descriptor set with incorrect arguments.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Create ordinary and push descriptor set layout
VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
const VkDescriptorSetLayoutObj ds_layout(m_device, {binding});
ASSERT_TRUE(ds_layout.initialized());
const VkDescriptorSetLayoutObj push_ds_layout(m_device, {binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
ASSERT_TRUE(push_ds_layout.initialized());
// Now use the descriptor set layouts to create a pipeline layout
const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout});
ASSERT_TRUE(pipeline_layout.initialized());
// Create a descriptor to push
const uint32_t buffer_data[4] = {4, 5, 6, 7};
VkConstantBufferObj buffer_obj(m_device, sizeof(buffer_data), &buffer_data);
ASSERT_TRUE(buffer_obj.initialized());
// Create a "write" struct, noting that the buffer_info cannot be a temporary arg (the return from write_descriptor_set
// references its data), and the DescriptorSet() can be temporary, because the value is ignored
VkDescriptorBufferInfo buffer_info = {buffer_obj.handle(), 0, VK_WHOLE_SIZE};
VkWriteDescriptorSet descriptor_write = vk_testing::Device::write_descriptor_set(
vk_testing::DescriptorSet(), 0, 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, &buffer_info);
// Find address of extension call and make the call
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR =
(PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR");
ASSERT_TRUE(vkCmdPushDescriptorSetKHR != nullptr);
// Section 1: Queue family matching/capabilities.
// Create command pool on a non-graphics queue
const uint32_t no_gfx_qfi = m_device->QueueFamilyMatching(VK_QUEUE_COMPUTE_BIT, VK_QUEUE_GRAPHICS_BIT);
const uint32_t transfer_only_qfi =
m_device->QueueFamilyMatching(VK_QUEUE_TRANSFER_BIT, (VK_QUEUE_COMPUTE_BIT | VK_QUEUE_GRAPHICS_BIT));
if ((UINT32_MAX == transfer_only_qfi) && (UINT32_MAX == no_gfx_qfi)) {
printf("%s No compute or transfer only queue family, skipping bindpoint and queue tests.", kSkipPrefix);
} else {
const uint32_t err_qfi = (UINT32_MAX == no_gfx_qfi) ? transfer_only_qfi : no_gfx_qfi;
VkCommandPoolObj command_pool(m_device, err_qfi);
ASSERT_TRUE(command_pool.initialized());
VkCommandBufferObj command_buffer(m_device, &command_pool);
ASSERT_TRUE(command_buffer.initialized());
command_buffer.begin();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363");
if (err_qfi == transfer_only_qfi) {
// This as this queue neither supports the gfx or compute bindpoints, we'll get two errors
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
}
vkCmdPushDescriptorSetKHR(command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_write);
m_errorMonitor->VerifyFound();
command_buffer.end();
// If we succeed in testing only one condition above, we need to test the other below.
if ((UINT32_MAX != transfer_only_qfi) && (err_qfi != transfer_only_qfi)) {
// Need to test the neither compute/gfx supported case separately.
VkCommandPoolObj tran_command_pool(m_device, transfer_only_qfi);
ASSERT_TRUE(tran_command_pool.initialized());
VkCommandBufferObj tran_command_buffer(m_device, &tran_command_pool);
ASSERT_TRUE(tran_command_buffer.initialized());
tran_command_buffer.begin();
// We can't avoid getting *both* errors in this case
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdPushDescriptorSetKHR-pipelineBindPoint-00363");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkCmdPushDescriptorSetKHR-commandBuffer-cmdpool");
vkCmdPushDescriptorSetKHR(tran_command_buffer.handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_write);
m_errorMonitor->VerifyFound();
tran_command_buffer.end();
}
}
// Push to the non-push binding
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00365");
vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1,
&descriptor_write);
m_errorMonitor->VerifyFound();
// Specify set out of bounds
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdPushDescriptorSetKHR-set-00364");
vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 2, 1,
&descriptor_write);
m_errorMonitor->VerifyFound();
m_commandBuffer->end();
// This is a test for VUID-vkCmdPushDescriptorSetKHR-commandBuffer-recording
// TODO: Add VALIDATION_ERROR_ code support to core_validation::ValidateCmd
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"You must call vkBeginCommandBuffer() before this call to vkCmdPushDescriptorSetKHR()");
vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_write);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, SetDynScissorParamTests) {
TEST_DESCRIPTION("Test parameters of vkCmdSetScissor without multiViewport feature");
VkPhysicalDeviceFeatures features{};
ASSERT_NO_FATAL_FAILURE(Init(&features));
const VkRect2D scissor = {{0, 0}, {16, 16}};
const VkRect2D scissors[] = {scissor, scissor};
m_commandBuffer->begin();
// array tests
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593");
vkCmdSetScissor(m_commandBuffer->handle(), 1, 1, scissors);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength");
vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594");
vkCmdSetScissor(m_commandBuffer->handle(), 0, 2, scissors);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength");
vkCmdSetScissor(m_commandBuffer->handle(), 1, 0, scissors);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00593");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-00594");
vkCmdSetScissor(m_commandBuffer->handle(), 1, 2, scissors);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter");
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, nullptr);
m_errorMonitor->VerifyFound();
struct TestCase {
VkRect2D scissor;
std::string vuid;
};
std::vector<TestCase> test_cases = {{{{-1, 0}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"},
{{{0, -1}, {16, 16}}, "VUID-vkCmdSetScissor-x-00595"},
{{{1, 0}, {INT32_MAX, 16}}, "VUID-vkCmdSetScissor-offset-00596"},
{{{INT32_MAX, 0}, {1, 16}}, "VUID-vkCmdSetScissor-offset-00596"},
{{{0, 0}, {uint32_t{INT32_MAX} + 1, 16}}, "VUID-vkCmdSetScissor-offset-00596"},
{{{0, 1}, {16, INT32_MAX}}, "VUID-vkCmdSetScissor-offset-00597"},
{{{0, INT32_MAX}, {16, 1}}, "VUID-vkCmdSetScissor-offset-00597"},
{{{0, 0}, {16, uint32_t{INT32_MAX} + 1}}, "VUID-vkCmdSetScissor-offset-00597"}};
for (const auto &test_case : test_cases) {
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, test_case.vuid);
vkCmdSetScissor(m_commandBuffer->handle(), 0, 1, &test_case.scissor);
m_errorMonitor->VerifyFound();
}
m_commandBuffer->end();
}
TEST_F(VkLayerTest, SetDynScissorParamMultiviewportTests) {
TEST_DESCRIPTION("Test parameters of vkCmdSetScissor with multiViewport feature enabled");
ASSERT_NO_FATAL_FAILURE(Init());
if (!m_device->phy().features().multiViewport) {
printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix);
return;
}
const auto max_scissors = m_device->props.limits.maxViewports;
const uint32_t too_many_scissors = 65536 + 1; // let's say this is too much to allocate pScissors for
m_commandBuffer->begin();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength");
vkCmdSetScissor(m_commandBuffer->handle(), 0, 0, nullptr);
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-pScissors-parameter");
vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors, nullptr);
m_errorMonitor->VerifyFound();
if (max_scissors >= too_many_scissors) {
printf(
"%s VkPhysicalDeviceLimits::maxViewports is too large to practically test against -- skipping "
"part of "
"test.\n",
kSkipPrefix);
return;
}
const VkRect2D scissor = {{0, 0}, {16, 16}};
const std::vector<VkRect2D> scissors(max_scissors + 1, scissor);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592");
vkCmdSetScissor(m_commandBuffer->handle(), 0, max_scissors + 1, scissors.data());
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592");
vkCmdSetScissor(m_commandBuffer->handle(), max_scissors, 1, scissors.data());
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592");
vkCmdSetScissor(m_commandBuffer->handle(), 1, max_scissors, scissors.data());
m_errorMonitor->VerifyFound();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-scissorCount-arraylength");
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdSetScissor-firstScissor-00592");
vkCmdSetScissor(m_commandBuffer->handle(), max_scissors + 1, 0, scissors.data());
m_errorMonitor->VerifyFound();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, EmptyDescriptorUpdateTest) {
TEST_DESCRIPTION("Update last descriptor in a set that includes an empty binding");
VkResult err;
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
// Create layout with two uniform buffer descriptors w/ empty binding between them
OneOffDescriptorSet ds(m_device, {
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
{1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0 /*!*/, 0, nullptr},
{2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr},
});
// Create a buffer to be used for update
VkBufferCreateInfo buff_ci = {};
buff_ci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_ci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buff_ci.size = 256;
buff_ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &buff_ci, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
// Have to bind memory to buffer before descriptor update
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 512; // one allocation for both buffers
mem_alloc.memoryTypeIndex = 0;
VkMemoryRequirements mem_reqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
// Make sure allocation is sufficiently large to accommodate buffer requirements
if (mem_reqs.size > mem_alloc.allocationSize) {
mem_alloc.allocationSize = mem_reqs.size;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
// Only update the descriptor at binding 2
VkDescriptorBufferInfo buff_info = {};
buff_info.buffer = buffer;
buff_info.offset = 0;
buff_info.range = VK_WHOLE_SIZE;
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 2;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.dstSet = ds.set_;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_errorMonitor->VerifyNotFound();
// Cleanup
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
}
TEST_F(VkLayerTest, MultiplePushDescriptorSets) {
TEST_DESCRIPTION("Verify an error message for multiple push descriptor sets.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const unsigned int descriptor_set_layout_count = 2;
std::vector<VkDescriptorSetLayoutObj> ds_layouts;
for (uint32_t i = 0; i < descriptor_set_layout_count; ++i) {
dsl_binding.binding = i;
ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding),
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
}
const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts);
VkPipelineLayout pipeline_layout;
VkPipelineLayoutCreateInfo pipeline_layout_ci = {};
pipeline_layout_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
pipeline_layout_ci.pNext = NULL;
pipeline_layout_ci.pushConstantRangeCount = 0;
pipeline_layout_ci.pPushConstantRanges = NULL;
pipeline_layout_ci.setLayoutCount = ds_vk_layouts.size();
pipeline_layout_ci.pSetLayouts = ds_vk_layouts.data();
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293");
vkCreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, CreateDescriptorUpdateTemplate) {
TEST_DESCRIPTION("Verify error messages for invalid vkCreateDescriptorUpdateTemplate calls.");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME) &&
DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME);
} else {
printf("%s Push Descriptors and Descriptor Update Template Extensions not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 0;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_ALL;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout_ub(m_device, {dsl_binding});
const VkDescriptorSetLayoutObj ds_layout_ub1(m_device, {dsl_binding});
const VkDescriptorSetLayoutObj ds_layout_ub_push(m_device, {dsl_binding},
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
const VkPipelineLayoutObj pipeline_layout(m_device, {{&ds_layout_ub, &ds_layout_ub1, &ds_layout_ub_push}});
PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR =
(PFN_vkCreateDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkCreateDescriptorUpdateTemplateKHR");
ASSERT_NE(vkCreateDescriptorUpdateTemplateKHR, nullptr);
PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR =
(PFN_vkDestroyDescriptorUpdateTemplateKHR)vkGetDeviceProcAddr(m_device->device(), "vkDestroyDescriptorUpdateTemplateKHR");
ASSERT_NE(vkDestroyDescriptorUpdateTemplateKHR, nullptr);
VkDescriptorUpdateTemplateEntry entries = {0, 0, 1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 0, sizeof(VkBuffer)};
VkDescriptorUpdateTemplateCreateInfo create_info = {};
create_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO;
create_info.pNext = nullptr;
create_info.flags = 0;
create_info.descriptorUpdateEntryCount = 1;
create_info.pDescriptorUpdateEntries = &entries;
auto do_test = [&](std::string err) {
VkDescriptorUpdateTemplateKHR dut = VK_NULL_HANDLE;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, err);
if (VK_SUCCESS == vkCreateDescriptorUpdateTemplateKHR(m_device->handle(), &create_info, nullptr, &dut)) {
vkDestroyDescriptorUpdateTemplateKHR(m_device->handle(), dut, nullptr);
}
m_errorMonitor->VerifyFound();
};
// Descriptor set type template
create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET;
// descriptorSetLayout is NULL
do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00350");
// Push descriptor type template
create_info.templateType = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR;
create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
create_info.pipelineLayout = pipeline_layout.handle();
create_info.set = 2;
// Bad bindpoint -- force fuzz the bind point
memset(&create_info.pipelineBindPoint, 0xFE, sizeof(create_info.pipelineBindPoint));
do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00351");
create_info.pipelineBindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
// Bad pipeline layout
create_info.pipelineLayout = VK_NULL_HANDLE;
do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00352");
create_info.pipelineLayout = pipeline_layout.handle();
// Wrong set #
create_info.set = 0;
do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353");
// Invalid set #
create_info.set = 42;
do_test("VUID-VkDescriptorUpdateTemplateCreateInfo-templateType-00353");
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, PushDescriptorNullDstSetTest) {
TEST_DESCRIPTION("Use null dstSet in CmdPushDescriptorSetKHR");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 2;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
// Now use the descriptor layout to create a pipeline layout
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds_layout});
static const float vbo_data[3] = {1.f, 0.f, 1.f};
VkConstantBufferObj vbo(m_device, sizeof(vbo_data), (const void *)&vbo_data);
VkDescriptorBufferInfo buff_info;
buff_info.buffer = vbo.handle();
buff_info.offset = 0;
buff_info.range = sizeof(vbo_data);
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 2;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.dstSet = 0; // Should not cause a validation error
// Find address of extension call and make the call
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR =
(PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR");
assert(vkCmdPushDescriptorSetKHR != nullptr);
m_commandBuffer->begin();
vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_write);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, PushDescriptorUnboundSetTest) {
TEST_DESCRIPTION("Ensure that no validation errors are produced for not bound push descriptor sets");
VkResult err;
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
} else {
printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_errorMonitor->ExpectSuccess();
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
// Create descriptor set layout
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 2;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
// Create push descriptor set layout
const VkDescriptorSetLayoutObj push_ds_layout(m_device, {dsl_binding}, VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
// Allocate descriptor set
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.descriptorPool = ds_pool;
alloc_info.descriptorSetCount = 1;
alloc_info.pSetLayouts = &ds_layout.handle();
VkDescriptorSet descriptor_set;
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_set);
ASSERT_VK_SUCCESS(err);
// Now use the descriptor layouts to create a pipeline layout
const VkPipelineLayoutObj pipeline_layout(m_device, {&push_ds_layout, &ds_layout});
// Create PSO
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=2) uniform foo1 { float x; } bar1;\n"
"layout(set=1) layout(binding=2) uniform foo2 { float y; } bar2;\n"
"void main(){\n"
" x = vec4(bar1.x) + vec4(bar2.y);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.SetViewport(m_viewports);
pipe.SetScissor(m_scissors);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
static const float bo_data[1] = {1.f};
VkConstantBufferObj buffer(m_device, sizeof(bo_data), (const void *)&bo_data, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
// Update descriptor set
VkDescriptorBufferInfo buff_info;
buff_info.buffer = buffer.handle();
buff_info.offset = 0;
buff_info.range = sizeof(bo_data);
VkWriteDescriptorSet descriptor_write = {};
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstBinding = 2;
descriptor_write.descriptorCount = 1;
descriptor_write.pTexelBufferView = nullptr;
descriptor_write.pBufferInfo = &buff_info;
descriptor_write.pImageInfo = nullptr;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptor_write.dstSet = descriptor_set;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR =
(PFN_vkCmdPushDescriptorSetKHR)vkGetDeviceProcAddr(m_device->device(), "vkCmdPushDescriptorSetKHR");
assert(vkCmdPushDescriptorSetKHR != nullptr);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// Push descriptors and bind descriptor set
vkCmdPushDescriptorSetKHR(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1,
&descriptor_write);
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 1, 1,
&descriptor_set, 0, NULL);
// No errors should be generated.
vkCmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, TestAliasedMemoryTracking) {
VkResult err;
bool pass;
TEST_DESCRIPTION(
"Create a buffer, allocate memory, bind memory, destroy the buffer, create an image, and bind the same memory to it");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkBuffer buffer;
VkImage image;
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
VkBufferCreateInfo buf_info = {};
buf_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buf_info.pNext = NULL;
buf_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buf_info.size = 256;
buf_info.queueFamilyIndexCount = 0;
buf_info.pQueueFamilyIndices = NULL;
buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buf_info.flags = 0;
err = vkCreateBuffer(m_device->device(), &buf_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
vkGetBufferMemoryRequirements(m_device->device(), buffer, &mem_reqs);
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
// Ensure memory is big enough for both bindings
alloc_info.allocationSize = 0x10000;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
uint8_t *pData;
err = vkMapMemory(m_device->device(), mem, 0, mem_reqs.size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
memset(pData, 0xCADECADE, static_cast<size_t>(mem_reqs.size));
vkUnmapMemory(m_device->device(), mem);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
// NOW, destroy the buffer. Obviously, the resource no longer occupies this
// memory. In fact, it was never used by the GPU.
// Just be sure, wait for idle.
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkDeviceWaitIdle(m_device->device());
// Use optimal as some platforms report linear support but then fail image creation
VkImageTiling image_tiling = VK_IMAGE_TILING_OPTIMAL;
VkImageFormatProperties image_format_properties;
vkGetPhysicalDeviceImageFormatProperties(gpu(), VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_TYPE_2D, image_tiling,
VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0, &image_format_properties);
if (image_format_properties.maxExtent.width == 0) {
printf("%s Image format not supported; skipped.\n", kSkipPrefix);
vkFreeMemory(m_device->device(), mem, NULL);
return;
}
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = image_tiling;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.queueFamilyIndexCount = 0;
image_create_info.pQueueFamilyIndices = NULL;
image_create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
image_create_info.flags = 0;
/* Create a mappable image. It will be the texture if linear images are OK
* to be textures or it will be the staging image if they are not.
*/
err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &mem_reqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = 0;
mem_alloc.memoryTypeIndex = 0;
mem_alloc.allocationSize = mem_reqs.size;
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &mem_alloc, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyImage(m_device->device(), image, NULL);
return;
}
// VALIDATION FAILURE:
err = vkBindImageMemory(m_device->device(), image, mem, 0);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vkFreeMemory(m_device->device(), mem, NULL);
vkDestroyImage(m_device->device(), image, NULL);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, TestDestroyFreeNullHandles) {
VkResult err;
TEST_DESCRIPTION("Call all applicable destroy and free routines with NULL handles, expecting no validation errors");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
vkDestroyBuffer(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyBufferView(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyCommandPool(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyDescriptorPool(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyDescriptorSetLayout(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyDevice(VK_NULL_HANDLE, NULL);
vkDestroyEvent(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyFramebuffer(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyImage(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyImageView(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyInstance(VK_NULL_HANDLE, NULL);
vkDestroyPipeline(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyPipelineCache(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyPipelineLayout(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyQueryPool(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyRenderPass(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroySampler(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroySemaphore(m_device->device(), VK_NULL_HANDLE, NULL);
vkDestroyShaderModule(m_device->device(), VK_NULL_HANDLE, NULL);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffers[3] = {};
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffers[1]);
vkFreeCommandBuffers(m_device->device(), command_pool, 3, command_buffers);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
VkDescriptorPoolSize ds_type_count = {};
ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
ds_type_count.descriptorCount = 1;
VkDescriptorPoolCreateInfo ds_pool_ci = {};
ds_pool_ci.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
ds_pool_ci.pNext = NULL;
ds_pool_ci.maxSets = 1;
ds_pool_ci.poolSizeCount = 1;
ds_pool_ci.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
ds_pool_ci.pPoolSizes = &ds_type_count;
VkDescriptorPool ds_pool;
err = vkCreateDescriptorPool(m_device->device(), &ds_pool_ci, NULL, &ds_pool);
ASSERT_VK_SUCCESS(err);
VkDescriptorSetLayoutBinding dsl_binding = {};
dsl_binding.binding = 2;
dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
dsl_binding.descriptorCount = 1;
dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT;
dsl_binding.pImmutableSamplers = NULL;
const VkDescriptorSetLayoutObj ds_layout(m_device, {dsl_binding});
VkDescriptorSet descriptor_sets[3] = {};
VkDescriptorSetAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc_info.descriptorSetCount = 1;
alloc_info.descriptorPool = ds_pool;
alloc_info.pSetLayouts = &ds_layout.handle();
err = vkAllocateDescriptorSets(m_device->device(), &alloc_info, &descriptor_sets[1]);
ASSERT_VK_SUCCESS(err);
vkFreeDescriptorSets(m_device->device(), ds_pool, 3, descriptor_sets);
vkDestroyDescriptorPool(m_device->device(), ds_pool, NULL);
vkFreeMemory(m_device->device(), VK_NULL_HANDLE, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, QueueSubmitSemaphoresAndLayoutTracking) {
TEST_DESCRIPTION("Submit multiple command buffers with chained semaphore signals and layout transitions");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkCommandBuffer cmd_bufs[4];
VkCommandBufferAllocateInfo alloc_info;
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.commandBufferCount = 4;
alloc_info.commandPool = m_commandPool->handle();
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &alloc_info, cmd_bufs);
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM,
(VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkCommandBufferBeginInfo cb_binfo;
cb_binfo.pNext = NULL;
cb_binfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
cb_binfo.pInheritanceInfo = VK_NULL_HANDLE;
cb_binfo.flags = 0;
// Use 4 command buffers, each with an image layout transition, ColorAO->General->ColorAO->TransferSrc->TransferDst
vkBeginCommandBuffer(cmd_bufs[0], &cb_binfo);
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = NULL;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.image = image.handle();
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
vkCmdPipelineBarrier(cmd_bufs[0], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1,
&img_barrier);
vkEndCommandBuffer(cmd_bufs[0]);
vkBeginCommandBuffer(cmd_bufs[1], &cb_binfo);
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
vkCmdPipelineBarrier(cmd_bufs[1], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1,
&img_barrier);
vkEndCommandBuffer(cmd_bufs[1]);
vkBeginCommandBuffer(cmd_bufs[2], &cb_binfo);
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
vkCmdPipelineBarrier(cmd_bufs[2], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1,
&img_barrier);
vkEndCommandBuffer(cmd_bufs[2]);
vkBeginCommandBuffer(cmd_bufs[3], &cb_binfo);
img_barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
vkCmdPipelineBarrier(cmd_bufs[3], VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0, 0, nullptr, 0, nullptr, 1,
&img_barrier);
vkEndCommandBuffer(cmd_bufs[3]);
// Submit 4 command buffers in 3 submits, with submits 2 and 3 waiting for semaphores from submits 1 and 2
VkSemaphore semaphore1, semaphore2;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore1);
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore2);
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info[3];
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].pNext = nullptr;
submit_info[0].commandBufferCount = 1;
submit_info[0].pCommandBuffers = &cmd_bufs[0];
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &semaphore1;
submit_info[0].waitSemaphoreCount = 0;
submit_info[0].pWaitDstStageMask = nullptr;
submit_info[0].pWaitDstStageMask = flags;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].pNext = nullptr;
submit_info[1].commandBufferCount = 1;
submit_info[1].pCommandBuffers = &cmd_bufs[1];
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &semaphore1;
submit_info[1].signalSemaphoreCount = 1;
submit_info[1].pSignalSemaphores = &semaphore2;
submit_info[1].pWaitDstStageMask = flags;
submit_info[2].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[2].pNext = nullptr;
submit_info[2].commandBufferCount = 2;
submit_info[2].pCommandBuffers = &cmd_bufs[2];
submit_info[2].waitSemaphoreCount = 1;
submit_info[2].pWaitSemaphores = &semaphore2;
submit_info[2].signalSemaphoreCount = 0;
submit_info[2].pSignalSemaphores = nullptr;
submit_info[2].pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 3, submit_info, VK_NULL_HANDLE);
vkQueueWaitIdle(m_device->m_queue);
vkDestroySemaphore(m_device->device(), semaphore1, NULL);
vkDestroySemaphore(m_device->device(), semaphore2, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, DynamicOffsetWithInactiveBinding) {
// Create a descriptorSet w/ dynamic descriptors where 1 binding is inactive
// We previously had a bug where dynamic offset of inactive bindings was still being used
VkResult err;
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitViewport());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
OneOffDescriptorSet ds(m_device, {
{2, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr},
});
const VkPipelineLayoutObj pipeline_layout(m_device, {&ds.layout_});
// Create two buffers to update the descriptors with
// The first will be 2k and used for bindings 0 & 1, the second is 1k for binding 2
uint32_t qfi = 0;
VkBufferCreateInfo buffCI = {};
buffCI.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffCI.size = 2048;
buffCI.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffCI.queueFamilyIndexCount = 1;
buffCI.pQueueFamilyIndices = &qfi;
VkBuffer dyub1;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub1);
ASSERT_VK_SUCCESS(err);
// buffer2
buffCI.size = 1024;
VkBuffer dyub2;
err = vkCreateBuffer(m_device->device(), &buffCI, NULL, &dyub2);
ASSERT_VK_SUCCESS(err);
// Allocate memory and bind to buffers
VkMemoryAllocateInfo mem_alloc[2] = {};
mem_alloc[0].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc[0].pNext = NULL;
mem_alloc[0].memoryTypeIndex = 0;
mem_alloc[1].sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc[1].pNext = NULL;
mem_alloc[1].memoryTypeIndex = 0;
VkMemoryRequirements mem_reqs1;
vkGetBufferMemoryRequirements(m_device->device(), dyub1, &mem_reqs1);
VkMemoryRequirements mem_reqs2;
vkGetBufferMemoryRequirements(m_device->device(), dyub2, &mem_reqs2);
mem_alloc[0].allocationSize = mem_reqs1.size;
bool pass = m_device->phy().set_memory_type(mem_reqs1.memoryTypeBits, &mem_alloc[0], 0);
mem_alloc[1].allocationSize = mem_reqs2.size;
pass &= m_device->phy().set_memory_type(mem_reqs2.memoryTypeBits, &mem_alloc[1], 0);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), dyub1, NULL);
vkDestroyBuffer(m_device->device(), dyub2, NULL);
return;
}
VkDeviceMemory mem1;
err = vkAllocateMemory(m_device->device(), &mem_alloc[0], NULL, &mem1);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), dyub1, mem1, 0);
ASSERT_VK_SUCCESS(err);
VkDeviceMemory mem2;
err = vkAllocateMemory(m_device->device(), &mem_alloc[1], NULL, &mem2);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), dyub2, mem2, 0);
ASSERT_VK_SUCCESS(err);
// Update descriptors
const uint32_t BINDING_COUNT = 3;
VkDescriptorBufferInfo buff_info[BINDING_COUNT] = {};
buff_info[0].buffer = dyub1;
buff_info[0].offset = 0;
buff_info[0].range = 256;
buff_info[1].buffer = dyub1;
buff_info[1].offset = 256;
buff_info[1].range = 512;
buff_info[2].buffer = dyub2;
buff_info[2].offset = 0;
buff_info[2].range = 512;
VkWriteDescriptorSet descriptor_write;
memset(&descriptor_write, 0, sizeof(descriptor_write));
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = ds.set_;
descriptor_write.dstBinding = 0;
descriptor_write.descriptorCount = BINDING_COUNT;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
descriptor_write.pBufferInfo = buff_info;
vkUpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL);
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo);
// Create PSO to be used for draw-time errors below
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 x;\n"
"layout(set=0) layout(binding=0) uniform foo1 { int x; int y; } bar1;\n"
"layout(set=0) layout(binding=2) uniform foo2 { int x; int y; } bar2;\n"
"void main(){\n"
" x = vec4(bar1.y) + vec4(bar2.y);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.SetViewport(m_viewports);
pipe.SetScissor(m_scissors);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass());
vkCmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle());
// This update should succeed, but offset of inactive binding 1 oversteps binding 2 buffer size
// we used to have a bug in this case.
uint32_t dyn_off[BINDING_COUNT] = {0, 1024, 256};
vkCmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout.handle(), 0, 1, &ds.set_,
BINDING_COUNT, dyn_off);
m_commandBuffer->Draw(1, 0, 0, 0);
m_errorMonitor->VerifyNotFound();
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
vkDestroyBuffer(m_device->device(), dyub1, NULL);
vkDestroyBuffer(m_device->device(), dyub2, NULL);
vkFreeMemory(m_device->device(), mem1, NULL);
vkFreeMemory(m_device->device(), mem2, NULL);
}
TEST_F(VkPositiveLayerTest, NonCoherentMemoryMapping) {
TEST_DESCRIPTION(
"Ensure that validations handling of non-coherent memory mapping while using VK_WHOLE_SIZE does not cause access "
"violations");
VkResult err;
uint8_t *pData;
ASSERT_NO_FATAL_FAILURE(Init());
VkDeviceMemory mem;
VkMemoryRequirements mem_reqs;
mem_reqs.memoryTypeBits = 0xFFFFFFFF;
const VkDeviceSize atom_size = m_device->props.limits.nonCoherentAtomSize;
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.memoryTypeIndex = 0;
static const VkDeviceSize allocation_size = 32 * atom_size;
alloc_info.allocationSize = allocation_size;
// Find a memory configurations WITHOUT a COHERENT bit, otherwise exit
bool pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
pass = m_device->phy().set_memory_type(mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
pass = m_device->phy().set_memory_type(
mem_reqs.memoryTypeBits, &alloc_info,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if (!pass) {
printf("%s Couldn't find a memory type wihtout a COHERENT bit.\n", kSkipPrefix);
return;
}
}
}
err = vkAllocateMemory(m_device->device(), &alloc_info, NULL, &mem);
ASSERT_VK_SUCCESS(err);
// Map/Flush/Invalidate using WHOLE_SIZE and zero offsets and entire mapped range
m_errorMonitor->ExpectSuccess();
err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
VkMappedMemoryRange mmr = {};
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = 0;
mmr.size = VK_WHOLE_SIZE;
err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vkUnmapMemory(m_device->device(), mem);
// Map/Flush/Invalidate using WHOLE_SIZE and an offset and entire mapped range
m_errorMonitor->ExpectSuccess();
err = vkMapMemory(m_device->device(), mem, 5 * atom_size, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = 6 * atom_size;
mmr.size = VK_WHOLE_SIZE;
err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vkUnmapMemory(m_device->device(), mem);
// Map with offset and size
// Flush/Invalidate subrange of mapped area with offset and size
m_errorMonitor->ExpectSuccess();
err = vkMapMemory(m_device->device(), mem, 3 * atom_size, 9 * atom_size, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = 4 * atom_size;
mmr.size = 2 * atom_size;
err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
err = vkInvalidateMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vkUnmapMemory(m_device->device(), mem);
// Map without offset and flush WHOLE_SIZE with two separate offsets
m_errorMonitor->ExpectSuccess();
err = vkMapMemory(m_device->device(), mem, 0, VK_WHOLE_SIZE, 0, (void **)&pData);
ASSERT_VK_SUCCESS(err);
mmr.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
mmr.memory = mem;
mmr.offset = allocation_size - (4 * atom_size);
mmr.size = VK_WHOLE_SIZE;
err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
mmr.offset = allocation_size - (6 * atom_size);
mmr.size = VK_WHOLE_SIZE;
err = vkFlushMappedMemoryRanges(m_device->device(), 1, &mmr);
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
vkUnmapMemory(m_device->device(), mem);
vkFreeMemory(m_device->device(), mem, NULL);
}
// This is a positive test. We used to expect error in this case but spec now allows it
TEST_F(VkPositiveLayerTest, ResetUnsignaledFence) {
m_errorMonitor->ExpectSuccess();
vk_testing::Fence testFence;
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fenceInfo.pNext = NULL;
ASSERT_NO_FATAL_FAILURE(Init());
testFence.init(*m_device, fenceInfo);
VkFence fences[1] = {testFence.handle()};
VkResult result = vkResetFences(m_device->device(), 1, fences);
ASSERT_VK_SUCCESS(result);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CommandBufferSimultaneousUseSync) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkResult err;
// Record (empty!) command buffer that can be submitted multiple times
// simultaneously.
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT, nullptr};
m_commandBuffer->begin(&cbbi);
m_commandBuffer->end();
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
VkFence fence;
err = vkCreateFence(m_device->device(), &fci, nullptr, &fence);
ASSERT_VK_SUCCESS(err);
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0};
VkSemaphore s1, s2;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s1);
ASSERT_VK_SUCCESS(err);
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s2);
ASSERT_VK_SUCCESS(err);
// Submit CB once signaling s1, with fence so we can roll forward to its retirement.
VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &m_commandBuffer->handle(), 1, &s1};
err = vkQueueSubmit(m_device->m_queue, 1, &si, fence);
ASSERT_VK_SUCCESS(err);
// Submit CB again, signaling s2.
si.pSignalSemaphores = &s2;
err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
// Wait for fence.
err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
ASSERT_VK_SUCCESS(err);
// CB is still in flight from second submission, but semaphore s1 is no
// longer in flight. delete it.
vkDestroySemaphore(m_device->device(), s1, nullptr);
m_errorMonitor->VerifyNotFound();
// Force device idle and clean up remaining objects
vkDeviceWaitIdle(m_device->device());
vkDestroySemaphore(m_device->device(), s2, nullptr);
vkDestroyFence(m_device->device(), fence, nullptr);
}
TEST_F(VkPositiveLayerTest, FenceCreateSignaledWaitHandling) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkResult err;
// A fence created signaled
VkFenceCreateInfo fci1 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, VK_FENCE_CREATE_SIGNALED_BIT};
VkFence f1;
err = vkCreateFence(m_device->device(), &fci1, nullptr, &f1);
ASSERT_VK_SUCCESS(err);
// A fence created not
VkFenceCreateInfo fci2 = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
VkFence f2;
err = vkCreateFence(m_device->device(), &fci2, nullptr, &f2);
ASSERT_VK_SUCCESS(err);
// Submit the unsignaled fence
VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 0, nullptr, 0, nullptr};
err = vkQueueSubmit(m_device->m_queue, 1, &si, f2);
// Wait on both fences, with signaled first.
VkFence fences[] = {f1, f2};
vkWaitForFences(m_device->device(), 2, fences, VK_TRUE, UINT64_MAX);
// Should have both retired!
vkDestroyFence(m_device->device(), f1, nullptr);
vkDestroyFence(m_device->device(), f2, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreateImageViewFollowsParameterCompatibilityRequirements) {
TEST_DESCRIPTION("Verify that creating an ImageView with valid usage does not generate validation errors.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
VkImageCreateInfo imgInfo = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
nullptr,
VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT,
VK_IMAGE_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{128, 128, 1},
1,
1,
VK_SAMPLE_COUNT_1_BIT,
VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
VK_SHARING_MODE_EXCLUSIVE,
0,
nullptr,
VK_IMAGE_LAYOUT_UNDEFINED};
VkImageObj image(m_device);
image.init(&imgInfo);
ASSERT_TRUE(image.initialized());
VkImageView imageView;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.baseArrayLayer = 0;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyNotFound();
vkDestroyImageView(m_device->device(), imageView, NULL);
}
TEST_F(VkPositiveLayerTest, ValidUsage) {
TEST_DESCRIPTION("Verify that creating an image view from an image with valid usage doesn't generate validation errors");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
// Verify that we can create a view with usage INPUT_ATTACHMENT
VkImageObj image(m_device);
image.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView imageView;
VkImageViewCreateInfo ivci = {};
ivci.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
ivci.image = image.handle();
ivci.viewType = VK_IMAGE_VIEW_TYPE_2D;
ivci.format = VK_FORMAT_R8G8B8A8_UNORM;
ivci.subresourceRange.layerCount = 1;
ivci.subresourceRange.baseMipLevel = 0;
ivci.subresourceRange.levelCount = 1;
ivci.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
vkCreateImageView(m_device->device(), &ivci, NULL, &imageView);
m_errorMonitor->VerifyNotFound();
vkDestroyImageView(m_device->device(), imageView, NULL);
}
// This is a positive test. No failures are expected.
TEST_F(VkPositiveLayerTest, BindSparse) {
TEST_DESCRIPTION("Bind 2 memory ranges to one image using vkQueueBindSparse, destroy the image and then free the memory");
ASSERT_NO_FATAL_FAILURE(Init());
auto index = m_device->graphics_queue_node_index_;
if (!(m_device->queue_props[index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT)) {
printf("%s Graphics queue does not have sparse binding bit.\n", kSkipPrefix);
return;
}
if (!m_device->phy().features().sparseBinding) {
printf("%s Device does not support sparse bindings.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkImage image;
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
image_create_info.pNext = NULL;
image_create_info.imageType = VK_IMAGE_TYPE_2D;
image_create_info.format = VK_FORMAT_B8G8R8A8_UNORM;
image_create_info.extent.width = 64;
image_create_info.extent.height = 64;
image_create_info.extent.depth = 1;
image_create_info.mipLevels = 1;
image_create_info.arrayLayers = 1;
image_create_info.samples = VK_SAMPLE_COUNT_1_BIT;
image_create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
image_create_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
image_create_info.flags = VK_IMAGE_CREATE_SPARSE_BINDING_BIT;
VkResult err = vkCreateImage(m_device->device(), &image_create_info, NULL, &image);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
VkDeviceMemory memory_one, memory_two;
bool pass;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = NULL;
memory_info.allocationSize = 0;
memory_info.memoryTypeIndex = 0;
vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
// Find an image big enough to allow sparse mapping of 2 memory regions
// Increase the image size until it is at least twice the
// size of the required alignment, to ensure we can bind both
// allocated memory blocks to the image on aligned offsets.
while (memory_reqs.size < (memory_reqs.alignment * 2)) {
vkDestroyImage(m_device->device(), image, nullptr);
image_create_info.extent.width *= 2;
image_create_info.extent.height *= 2;
err = vkCreateImage(m_device->device(), &image_create_info, nullptr, &image);
ASSERT_VK_SUCCESS(err);
vkGetImageMemoryRequirements(m_device->device(), image, &memory_reqs);
}
// Allocate 2 memory regions of minimum alignment size, bind one at 0, the other
// at the end of the first
memory_info.allocationSize = memory_reqs.alignment;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_one);
ASSERT_VK_SUCCESS(err);
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &memory_two);
ASSERT_VK_SUCCESS(err);
VkSparseMemoryBind binds[2];
binds[0].flags = 0;
binds[0].memory = memory_one;
binds[0].memoryOffset = 0;
binds[0].resourceOffset = 0;
binds[0].size = memory_info.allocationSize;
binds[1].flags = 0;
binds[1].memory = memory_two;
binds[1].memoryOffset = 0;
binds[1].resourceOffset = memory_info.allocationSize;
binds[1].size = memory_info.allocationSize;
VkSparseImageOpaqueMemoryBindInfo opaqueBindInfo;
opaqueBindInfo.image = image;
opaqueBindInfo.bindCount = 2;
opaqueBindInfo.pBinds = binds;
VkFence fence = VK_NULL_HANDLE;
VkBindSparseInfo bindSparseInfo = {};
bindSparseInfo.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
bindSparseInfo.imageOpaqueBindCount = 1;
bindSparseInfo.pImageOpaqueBinds = &opaqueBindInfo;
vkQueueBindSparse(m_device->m_queue, 1, &bindSparseInfo, fence);
vkQueueWaitIdle(m_device->m_queue);
vkDestroyImage(m_device->device(), image, NULL);
vkFreeMemory(m_device->device(), memory_one, NULL);
vkFreeMemory(m_device->device(), memory_two, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, RenderPassInitialLayoutUndefined) {
TEST_DESCRIPTION(
"Ensure that CmdBeginRenderPass with an attachment's initialLayout of VK_IMAGE_LAYOUT_UNDEFINED works when the command "
"buffer has prior knowledge of that attachment's layout.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with one color attachment.
VkAttachmentDescription attachment = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_R8G8B8A8_UNORM,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Record a single command buffer which uses this renderpass twice. The
// bug is triggered at the beginning of the second renderpass, when the
// command buffer already has a layout recorded for the attachment.
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(m_commandBuffer->handle());
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyNotFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
vkDestroyImageView(m_device->device(), view, nullptr);
}
TEST_F(VkPositiveLayerTest, FramebufferBindingDestroyCommandPool) {
TEST_DESCRIPTION(
"This test should pass. Create a Framebuffer and command buffer, bind them together, then destroy command pool and "
"framebuffer and verify there are no errors.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with one color attachment.
VkAttachmentDescription attachment = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Explicitly create a command buffer to bind the FB to so that we can then
// destroy the command pool in order to implicitly free command buffer
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer);
// Begin our cmd buffer with renderpass using our framebuffer
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer, &begin_info);
vkCmdBeginRenderPass(command_buffer, &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(command_buffer);
vkEndCommandBuffer(command_buffer);
// Destroy command pool to implicitly free command buffer
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, RenderPassSubpassZeroTransitionsApplied) {
TEST_DESCRIPTION("Ensure that CmdBeginRenderPass applies the layout transitions for the first subpass");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with one color attachment.
VkAttachmentDescription attachment = {0,
VK_FORMAT_R8G8B8A8_UNORM,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep};
VkResult err;
VkRenderPass rp;
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageView view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Record a single command buffer which issues a pipeline barrier w/
// image memory barrier for the attachment. This detects the previously
// missing tracking of the subpass layout by throwing a validation error
// if it doesn't occur.
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
VkImageMemoryBarrier imb = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
nullptr,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
image.handle(),
{VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}};
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1,
&imb);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkPositiveLayerTest, DepthStencilLayoutTransitionForDepthOnlyImageview) {
TEST_DESCRIPTION(
"Validate that when an imageView of a depth/stencil image is used as a depth/stencil framebuffer attachment, the "
"aspectMask is ignored and both depth and stencil image subresources are used.");
ASSERT_NO_FATAL_FAILURE(Init());
VkFormatProperties format_properties;
vkGetPhysicalDeviceFormatProperties(gpu(), VK_FORMAT_D32_SFLOAT_S8_UINT, &format_properties);
if (!(format_properties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
printf("%s Image format does not support sampling.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkAttachmentDescription attachment = {0,
VK_FORMAT_D32_SFLOAT_S8_UINT,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr};
VkSubpassDependency dep = {0,
0,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 1, &dep};
VkResult err;
VkRenderPass rp;
err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
VkImageObj image(m_device);
image.InitNoLayout(32, 32, 1, VK_FORMAT_D32_SFLOAT_S8_UINT,
0x26, // usage
VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
image.SetLayout(0x6, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
VK_FORMAT_D32_SFLOAT_S8_UINT,
{VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A},
{0x2, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
m_commandBuffer->begin();
VkImageMemoryBarrier imb = {};
imb.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
imb.pNext = nullptr;
imb.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
imb.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
imb.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
imb.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imb.srcQueueFamilyIndex = 0;
imb.dstQueueFamilyIndex = 0;
imb.image = image.handle();
imb.subresourceRange.aspectMask = 0x6;
imb.subresourceRange.baseMipLevel = 0;
imb.subresourceRange.levelCount = 0x1;
imb.subresourceRange.baseArrayLayer = 0;
imb.subresourceRange.layerCount = 0x1;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_DEPENDENCY_BY_REGION_BIT, 0, nullptr, 0, nullptr, 1, &imb);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyNotFound();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
vkDestroyImageView(m_device->device(), view, nullptr);
}
TEST_F(VkPositiveLayerTest, RenderPassTransitionsAttachmentUnused) {
TEST_DESCRIPTION(
"Ensure that layout transitions work correctly without errors, when an attachment reference is VK_ATTACHMENT_UNUSED");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
// A renderpass with no attachments
VkAttachmentReference att_ref = {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &att_ref, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible framebuffer.
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 0, nullptr, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
// Record a command buffer which just begins and ends the renderpass. The
// bug manifests in BeginRenderPass.
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
// This is a positive test. No errors are expected.
TEST_F(VkPositiveLayerTest, StencilLoadOp) {
TEST_DESCRIPTION("Create a stencil-only attachment with a LOAD_OP set to CLEAR. stencil[Load|Store]Op used to be ignored.");
VkResult result = VK_SUCCESS;
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageFormatProperties formatProps;
vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0,
&formatProps);
if (formatProps.maxExtent.width < 100 || formatProps.maxExtent.height < 100) {
printf("%s Image format max extent is too small.\n", kSkipPrefix);
return;
}
VkFormat depth_stencil_fmt = depth_format;
m_depthStencil->Init(m_device, 100, 100, depth_stencil_fmt,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
VkAttachmentDescription att = {};
VkAttachmentReference ref = {};
att.format = depth_stencil_fmt;
att.samples = VK_SAMPLE_COUNT_1_BIT;
att.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
att.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
att.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkClearValue clear;
clear.depthStencil.depth = 1.0;
clear.depthStencil.stencil = 0;
ref.attachment = 0;
ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.flags = 0;
subpass.inputAttachmentCount = 0;
subpass.pInputAttachments = NULL;
subpass.colorAttachmentCount = 0;
subpass.pColorAttachments = NULL;
subpass.pResolveAttachments = NULL;
subpass.pDepthStencilAttachment = &ref;
subpass.preserveAttachmentCount = 0;
subpass.pPreserveAttachments = NULL;
VkRenderPass rp;
VkRenderPassCreateInfo rp_info = {};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = 1;
rp_info.pAttachments = &att;
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
result = vkCreateRenderPass(device(), &rp_info, NULL, &rp);
ASSERT_VK_SUCCESS(result);
VkImageView *depthView = m_depthStencil->BindInfo();
VkFramebufferCreateInfo fb_info = {};
fb_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
fb_info.pNext = NULL;
fb_info.renderPass = rp;
fb_info.attachmentCount = 1;
fb_info.pAttachments = depthView;
fb_info.width = 100;
fb_info.height = 100;
fb_info.layers = 1;
VkFramebuffer fb;
result = vkCreateFramebuffer(device(), &fb_info, NULL, &fb);
ASSERT_VK_SUCCESS(result);
VkRenderPassBeginInfo rpbinfo = {};
rpbinfo.clearValueCount = 1;
rpbinfo.pClearValues = &clear;
rpbinfo.pNext = NULL;
rpbinfo.renderPass = rp;
rpbinfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
rpbinfo.renderArea.extent.width = 100;
rpbinfo.renderArea.extent.height = 100;
rpbinfo.renderArea.offset.x = 0;
rpbinfo.renderArea.offset.y = 0;
rpbinfo.framebuffer = fb;
VkFenceObj fence;
fence.init(*m_device, VkFenceObj::create_info());
ASSERT_TRUE(fence.initialized());
m_commandBuffer->begin();
m_commandBuffer->BeginRenderPass(rpbinfo);
m_commandBuffer->EndRenderPass();
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(fence);
VkImageObj destImage(m_device);
destImage.Init(100, 100, 1, depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
VK_IMAGE_TILING_OPTIMAL, 0);
VkImageMemoryBarrier barrier = {};
VkImageSubresourceRange range;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
barrier.oldLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
barrier.image = m_depthStencil->handle();
range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
range.baseMipLevel = 0;
range.levelCount = 1;
range.baseArrayLayer = 0;
range.layerCount = 1;
barrier.subresourceRange = range;
fence.wait(VK_TRUE, UINT64_MAX);
VkCommandBufferObj cmdbuf(m_device, m_commandPool);
cmdbuf.begin();
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
barrier.srcAccessMask = 0;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.image = destImage.handle();
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier);
VkImageCopy cregion;
cregion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
cregion.srcSubresource.mipLevel = 0;
cregion.srcSubresource.baseArrayLayer = 0;
cregion.srcSubresource.layerCount = 1;
cregion.srcOffset.x = 0;
cregion.srcOffset.y = 0;
cregion.srcOffset.z = 0;
cregion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
cregion.dstSubresource.mipLevel = 0;
cregion.dstSubresource.baseArrayLayer = 0;
cregion.dstSubresource.layerCount = 1;
cregion.dstOffset.x = 0;
cregion.dstOffset.y = 0;
cregion.dstOffset.z = 0;
cregion.extent.width = 100;
cregion.extent.height = 100;
cregion.extent.depth = 1;
cmdbuf.CopyImage(m_depthStencil->handle(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, destImage.handle(),
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &cregion);
cmdbuf.end();
VkSubmitInfo submit_info;
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmdbuf.handle();
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
m_errorMonitor->ExpectSuccess();
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
m_errorMonitor->VerifyNotFound();
vkQueueWaitIdle(m_device->m_queue);
vkDestroyRenderPass(m_device->device(), rp, nullptr);
vkDestroyFramebuffer(m_device->device(), fb, nullptr);
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, BarrierLayoutToImageUsage) {
TEST_DESCRIPTION("Ensure barriers' new and old VkImageLayout are compatible with their images' VkImageUsageFlags");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageMemoryBarrier img_barrier = {};
img_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
img_barrier.pNext = NULL;
img_barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
img_barrier.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.subresourceRange.baseArrayLayer = 0;
img_barrier.subresourceRange.baseMipLevel = 0;
img_barrier.subresourceRange.layerCount = 1;
img_barrier.subresourceRange.levelCount = 1;
{
VkImageObj img_color(m_device);
img_color.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_color.initialized());
VkImageObj img_ds1(m_device);
img_ds1.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_ds1.initialized());
VkImageObj img_ds2(m_device);
img_ds2.Init(128, 128, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_ds2.initialized());
VkImageObj img_xfer_src(m_device);
img_xfer_src.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_xfer_src.initialized());
VkImageObj img_xfer_dst(m_device);
img_xfer_dst.Init(128, 128, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_xfer_dst.initialized());
VkImageObj img_sampled(m_device);
img_sampled.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_sampled.initialized());
VkImageObj img_input(m_device);
img_input.Init(128, 128, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(img_input.initialized());
const struct {
VkImageObj &image_obj;
VkImageLayout old_layout;
VkImageLayout new_layout;
} buffer_layouts[] = {
// clang-format off
{img_color, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_ds1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_ds2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_sampled, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_input, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_xfer_src, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
{img_xfer_dst, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL},
// clang-format on
};
const uint32_t layout_count = sizeof(buffer_layouts) / sizeof(buffer_layouts[0]);
m_commandBuffer->begin();
for (uint32_t i = 0; i < layout_count; ++i) {
img_barrier.image = buffer_layouts[i].image_obj.handle();
const VkImageUsageFlags usage = buffer_layouts[i].image_obj.usage();
img_barrier.subresourceRange.aspectMask = (usage == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
? (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)
: VK_IMAGE_ASPECT_COLOR_BIT;
img_barrier.oldLayout = buffer_layouts[i].old_layout;
img_barrier.newLayout = buffer_layouts[i].new_layout;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
img_barrier.oldLayout = buffer_layouts[i].new_layout;
img_barrier.newLayout = buffer_layouts[i].old_layout;
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, 0, 0,
nullptr, 0, nullptr, 1, &img_barrier);
}
m_commandBuffer->end();
img_barrier.oldLayout = VK_IMAGE_LAYOUT_GENERAL;
img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL;
}
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, WaitEventThenSet) {
TEST_DESCRIPTION("Wait on a event then set it after the wait has been submitted.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer, &begin_info);
vkCmdWaitEvents(command_buffer, 1, &event, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, nullptr, 0,
nullptr, 0, nullptr);
vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
vkEndCommandBuffer(command_buffer);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{ vkSetEvent(m_device->device(), event); }
vkQueueWaitIdle(queue);
vkDestroyEvent(m_device->device(), event, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, QueryAndCopySecondaryCommandBuffers) {
TEST_DESCRIPTION("Issue a query on a secondary command buffer and copy it on a primary.");
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
VkCommandPoolObj command_pool(m_device, m_device->graphics_queue_node_index_, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT);
VkCommandBufferObj primary_buffer(m_device, &command_pool);
VkCommandBufferObj secondary_buffer(m_device, &command_pool, VK_COMMAND_BUFFER_LEVEL_SECONDARY);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue);
uint32_t qfi = 0;
VkBufferCreateInfo buff_create_info = {};
buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_create_info.size = 1024;
buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buff_create_info.queueFamilyIndexCount = 1;
buff_create_info.pQueueFamilyIndices = &qfi;
VkResult err;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memReqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = memReqs.size;
mem_alloc.memoryTypeIndex = 0;
bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
printf("%s Failed to allocate memory.\n", kSkipPrefix);
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
VkCommandBufferInheritanceInfo hinfo = {};
hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
hinfo.renderPass = VK_NULL_HANDLE;
hinfo.subpass = 0;
hinfo.framebuffer = VK_NULL_HANDLE;
hinfo.occlusionQueryEnable = VK_FALSE;
hinfo.queryFlags = 0;
hinfo.pipelineStatistics = 0;
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.pInheritanceInfo = &hinfo;
secondary_buffer.begin(&begin_info);
vkCmdResetQueryPool(secondary_buffer.handle(), query_pool, 0, 1);
vkCmdWriteTimestamp(secondary_buffer.handle(), VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0);
secondary_buffer.end();
primary_buffer.begin();
vkCmdExecuteCommands(primary_buffer.handle(), 1, &secondary_buffer.handle());
vkCmdCopyQueryPoolResults(primary_buffer.handle(), query_pool, 0, 1, buffer, 0, 0, 0);
primary_buffer.end();
}
primary_buffer.QueueCommandBuffer();
vkQueueWaitIdle(queue);
vkDestroyQueryPool(m_device->device(), query_pool, nullptr);
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, QueryAndCopyMultipleCommandBuffers) {
TEST_DESCRIPTION("Issue a query and copy from it on a second command buffer.");
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkQueryPool query_pool;
VkQueryPoolCreateInfo query_pool_create_info{};
query_pool_create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
query_pool_create_info.queryType = VK_QUERY_TYPE_TIMESTAMP;
query_pool_create_info.queryCount = 1;
vkCreateQueryPool(m_device->device(), &query_pool_create_info, nullptr, &query_pool);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue);
uint32_t qfi = 0;
VkBufferCreateInfo buff_create_info = {};
buff_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buff_create_info.size = 1024;
buff_create_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
buff_create_info.queueFamilyIndexCount = 1;
buff_create_info.pQueueFamilyIndices = &qfi;
VkResult err;
VkBuffer buffer;
err = vkCreateBuffer(m_device->device(), &buff_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memReqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memReqs);
VkMemoryAllocateInfo mem_alloc = {};
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
mem_alloc.allocationSize = memReqs.size;
mem_alloc.memoryTypeIndex = 0;
bool pass = m_device->phy().set_memory_type(memReqs.memoryTypeBits, &mem_alloc, 0);
if (!pass) {
vkDestroyBuffer(m_device->device(), buffer, NULL);
return;
}
VkDeviceMemory mem;
err = vkAllocateMemory(m_device->device(), &mem_alloc, NULL, &mem);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, mem, 0);
ASSERT_VK_SUCCESS(err);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdResetQueryPool(command_buffer[0], query_pool, 0, 1);
vkCmdWriteTimestamp(command_buffer[0], VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, query_pool, 0);
vkEndCommandBuffer(command_buffer[0]);
vkBeginCommandBuffer(command_buffer[1], &begin_info);
vkCmdCopyQueryPoolResults(command_buffer[1], query_pool, 0, 1, buffer, 0, 0, 0);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 2;
submit_info.pCommandBuffers = command_buffer;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
vkQueueWaitIdle(queue);
vkDestroyQueryPool(m_device->device(), query_pool, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, command_buffer);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), mem, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, ResetEventThenSet) {
TEST_DESCRIPTION("Reset an event then set it after the reset has been submitted.");
ASSERT_NO_FATAL_FAILURE(Init());
VkEvent event;
VkEventCreateInfo event_create_info{};
event_create_info.sType = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
vkCreateEvent(m_device->device(), &event_create_info, nullptr, &event);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer;
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 1;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, &command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer, &begin_info);
vkCmdResetEvent(command_buffer, event, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
vkEndCommandBuffer(command_buffer);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = nullptr;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "that is already in use by a command buffer.");
vkSetEvent(m_device->device(), event);
m_errorMonitor->VerifyFound();
}
vkQueueWaitIdle(queue);
vkDestroyEvent(m_device->device(), event, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 1, &command_buffer);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoFencesThreeFrames) {
TEST_DESCRIPTION(
"Two command buffers with two separate fences are each run through a Submit & WaitForFences cycle 3 times. This previously "
"revealed a bug so running this positive test to prevent a regression.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 0, &queue);
static const uint32_t NUM_OBJECTS = 2;
static const uint32_t NUM_FRAMES = 3;
VkCommandBuffer cmd_buffers[NUM_OBJECTS] = {};
VkFence fences[NUM_OBJECTS] = {};
VkCommandPool cmd_pool;
VkCommandPoolCreateInfo cmd_pool_ci = {};
cmd_pool_ci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
cmd_pool_ci.queueFamilyIndex = m_device->graphics_queue_node_index_;
cmd_pool_ci.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
VkResult err = vkCreateCommandPool(m_device->device(), &cmd_pool_ci, nullptr, &cmd_pool);
ASSERT_VK_SUCCESS(err);
VkCommandBufferAllocateInfo cmd_buf_info = {};
cmd_buf_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
cmd_buf_info.commandPool = cmd_pool;
cmd_buf_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd_buf_info.commandBufferCount = 1;
VkFenceCreateInfo fence_ci = {};
fence_ci.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_ci.pNext = nullptr;
fence_ci.flags = 0;
for (uint32_t i = 0; i < NUM_OBJECTS; ++i) {
err = vkAllocateCommandBuffers(m_device->device(), &cmd_buf_info, &cmd_buffers[i]);
ASSERT_VK_SUCCESS(err);
err = vkCreateFence(m_device->device(), &fence_ci, nullptr, &fences[i]);
ASSERT_VK_SUCCESS(err);
}
for (uint32_t frame = 0; frame < NUM_FRAMES; ++frame) {
for (uint32_t obj = 0; obj < NUM_OBJECTS; ++obj) {
// Create empty cmd buffer
VkCommandBufferBeginInfo cmdBufBeginDesc = {};
cmdBufBeginDesc.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
err = vkBeginCommandBuffer(cmd_buffers[obj], &cmdBufBeginDesc);
ASSERT_VK_SUCCESS(err);
err = vkEndCommandBuffer(cmd_buffers[obj]);
ASSERT_VK_SUCCESS(err);
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &cmd_buffers[obj];
// Submit cmd buffer and wait for fence
err = vkQueueSubmit(queue, 1, &submit_info, fences[obj]);
ASSERT_VK_SUCCESS(err);
err = vkWaitForFences(m_device->device(), 1, &fences[obj], VK_TRUE, UINT64_MAX);
ASSERT_VK_SUCCESS(err);
err = vkResetFences(m_device->device(), 1, &fences[obj]);
ASSERT_VK_SUCCESS(err);
}
}
m_errorMonitor->VerifyNotFound();
vkDestroyCommandPool(m_device->device(), cmd_pool, NULL);
for (uint32_t i = 0; i < NUM_OBJECTS; ++i) {
vkDestroyFence(m_device->device(), fences[i], nullptr);
}
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWI) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call submitted on separate queues followed by a QueueWaitIdle.");
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
}
vkQueueWaitIdle(m_device->m_queue);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceQWIFence) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed "
"by a QueueWaitIdle.");
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence);
}
vkQueueWaitIdle(m_device->m_queue);
vkDestroyFence(m_device->device(), fence, nullptr);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFenceTwoWFF) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence followed "
"by two consecutive WaitForFences calls on the same fence.");
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence);
}
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vkDestroyFence(m_device->device(), fence, nullptr);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, TwoQueuesEnsureCorrectRetirementWithWorkStolen) {
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Test requires two queues, skipping\n", kSkipPrefix);
return;
}
VkResult err;
m_errorMonitor->ExpectSuccess();
VkQueue q0 = m_device->m_queue;
VkQueue q1 = nullptr;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &q1);
ASSERT_NE(q1, nullptr);
// An (empty) command buffer. We must have work in the first submission --
// the layer treats unfenced work differently from fenced work.
VkCommandPoolCreateInfo cpci = {VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, 0, 0};
VkCommandPool pool;
err = vkCreateCommandPool(m_device->device(), &cpci, nullptr, &pool);
ASSERT_VK_SUCCESS(err);
VkCommandBufferAllocateInfo cbai = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, nullptr, pool,
VK_COMMAND_BUFFER_LEVEL_PRIMARY, 1};
VkCommandBuffer cb;
err = vkAllocateCommandBuffers(m_device->device(), &cbai, &cb);
ASSERT_VK_SUCCESS(err);
VkCommandBufferBeginInfo cbbi = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr, 0, nullptr};
err = vkBeginCommandBuffer(cb, &cbbi);
ASSERT_VK_SUCCESS(err);
err = vkEndCommandBuffer(cb);
ASSERT_VK_SUCCESS(err);
// A semaphore
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0};
VkSemaphore s;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &s);
ASSERT_VK_SUCCESS(err);
// First submission, to q0
VkSubmitInfo s0 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, nullptr, 1, &cb, 1, &s};
err = vkQueueSubmit(q0, 1, &s0, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
// Second submission, to q1, waiting on s
VkFlags waitmask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; // doesn't really matter what this value is.
VkSubmitInfo s1 = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &s, &waitmask, 0, nullptr, 0, nullptr};
err = vkQueueSubmit(q1, 1, &s1, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
// Wait for q0 idle
err = vkQueueWaitIdle(q0);
ASSERT_VK_SUCCESS(err);
// Command buffer should have been completed (it was on q0); reset the pool.
vkFreeCommandBuffers(m_device->device(), pool, 1, &cb);
m_errorMonitor->VerifyNotFound();
// Force device completely idle and clean up resources
vkDeviceWaitIdle(m_device->device());
vkDestroyCommandPool(m_device->device(), pool, nullptr);
vkDestroySemaphore(m_device->device(), s, nullptr);
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsSeparateQueuesWithSemaphoreAndOneFence) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call submitted on separate queues, the second having a fence, "
"followed by a WaitForFences call.");
ASSERT_NO_FATAL_FAILURE(Init());
if ((m_device->queue_props.empty()) || (m_device->queue_props[0].queueCount < 2)) {
printf("%s Queue family needs to have multiple queues to run this test.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
VkQueue queue = VK_NULL_HANDLE;
vkGetDeviceQueue(m_device->device(), m_device->graphics_queue_node_index_, 1, &queue);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence);
}
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vkDestroyFence(m_device->device(), fence, nullptr);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueWithSemaphoreAndOneFence) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call on the same queue, sharing a signal/wait semaphore, the second "
"having a fence, followed by a WaitForFences call.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &semaphore;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &semaphore;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence);
}
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vkDestroyFence(m_device->device(), fence, nullptr);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueNullQueueSubmitWithFence) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call on the same queue, no fences, followed by a third QueueSubmit "
"with NO SubmitInfos but with a fence, followed by a WaitForFences call.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = VK_NULL_HANDLE;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = VK_NULL_HANDLE;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
}
vkQueueSubmit(m_device->m_queue, 0, NULL, fence);
VkResult err = vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
ASSERT_VK_SUCCESS(err);
vkDestroyFence(m_device->device(), fence, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoQueueSubmitsOneQueueOneFence) {
TEST_DESCRIPTION(
"Two command buffers, each in a separate QueueSubmit call on the same queue, the second having a fence, followed by a "
"WaitForFences call.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[0];
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = VK_NULL_HANDLE;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, VK_NULL_HANDLE);
}
{
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
VkSubmitInfo submit_info{};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &command_buffer[1];
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = VK_NULL_HANDLE;
submit_info.pWaitDstStageMask = flags;
vkQueueSubmit(m_device->m_queue, 1, &submit_info, fence);
}
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vkDestroyFence(m_device->device(), fence, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
m_errorMonitor->VerifyNotFound();
}
// This is a positive test. No errors should be generated.
TEST_F(VkPositiveLayerTest, TwoSubmitInfosWithSemaphoreOneQueueSubmitsOneFence) {
TEST_DESCRIPTION(
"Two command buffers each in a separate SubmitInfo sent in a single QueueSubmit call followed by a WaitForFences call.");
ASSERT_NO_FATAL_FAILURE(Init());
m_errorMonitor->ExpectSuccess();
VkFence fence;
VkFenceCreateInfo fence_create_info{};
fence_create_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
vkCreateFence(m_device->device(), &fence_create_info, nullptr, &fence);
VkSemaphore semaphore;
VkSemaphoreCreateInfo semaphore_create_info{};
semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
vkCreateSemaphore(m_device->device(), &semaphore_create_info, nullptr, &semaphore);
VkCommandPool command_pool;
VkCommandPoolCreateInfo pool_create_info{};
pool_create_info.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
pool_create_info.queueFamilyIndex = m_device->graphics_queue_node_index_;
pool_create_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
vkCreateCommandPool(m_device->device(), &pool_create_info, nullptr, &command_pool);
VkCommandBuffer command_buffer[2];
VkCommandBufferAllocateInfo command_buffer_allocate_info{};
command_buffer_allocate_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
command_buffer_allocate_info.commandPool = command_pool;
command_buffer_allocate_info.commandBufferCount = 2;
command_buffer_allocate_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
vkAllocateCommandBuffers(m_device->device(), &command_buffer_allocate_info, command_buffer);
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[0], &begin_info);
vkCmdPipelineBarrier(command_buffer[0], VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 0, nullptr);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[0], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[0]);
}
{
VkCommandBufferBeginInfo begin_info{};
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
vkBeginCommandBuffer(command_buffer[1], &begin_info);
VkViewport viewport{};
viewport.maxDepth = 1.0f;
viewport.minDepth = 0.0f;
viewport.width = 512;
viewport.height = 512;
viewport.x = 0;
viewport.y = 0;
vkCmdSetViewport(command_buffer[1], 0, 1, &viewport);
vkEndCommandBuffer(command_buffer[1]);
}
{
VkSubmitInfo submit_info[2];
VkPipelineStageFlags flags[]{VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
submit_info[0].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[0].pNext = NULL;
submit_info[0].commandBufferCount = 1;
submit_info[0].pCommandBuffers = &command_buffer[0];
submit_info[0].signalSemaphoreCount = 1;
submit_info[0].pSignalSemaphores = &semaphore;
submit_info[0].waitSemaphoreCount = 0;
submit_info[0].pWaitSemaphores = NULL;
submit_info[0].pWaitDstStageMask = 0;
submit_info[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info[1].pNext = NULL;
submit_info[1].commandBufferCount = 1;
submit_info[1].pCommandBuffers = &command_buffer[1];
submit_info[1].waitSemaphoreCount = 1;
submit_info[1].pWaitSemaphores = &semaphore;
submit_info[1].pWaitDstStageMask = flags;
submit_info[1].signalSemaphoreCount = 0;
submit_info[1].pSignalSemaphores = NULL;
vkQueueSubmit(m_device->m_queue, 2, &submit_info[0], fence);
}
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
vkDestroyFence(m_device->device(), fence, nullptr);
vkFreeCommandBuffers(m_device->device(), command_pool, 2, &command_buffer[0]);
vkDestroyCommandPool(m_device->device(), command_pool, NULL);
vkDestroySemaphore(m_device->device(), semaphore, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, RenderPassSecondaryCommandBuffersMultipleTimes) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &m_renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
m_errorMonitor->VerifyNotFound();
vkCmdEndRenderPass(m_commandBuffer->handle());
m_errorMonitor->VerifyNotFound();
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ValidRenderPassAttachmentLayoutWithLoadOp) {
TEST_DESCRIPTION(
"Positive test where we create a renderpass with an attachment that uses LOAD_OP_CLEAR, the first subpass has a valid "
"layout, and a second subpass then uses a valid *READ_ONLY* layout.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkAttachmentReference attach[2] = {};
attach[0].attachment = 0;
attach[0].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attach[1].attachment = 0;
attach[1].layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
VkSubpassDescription subpasses[2] = {};
// First subpass clears DS attach on load
subpasses[0].pDepthStencilAttachment = &attach[0];
// 2nd subpass reads in DS as input attachment
subpasses[1].inputAttachmentCount = 1;
subpasses[1].pInputAttachments = &attach[1];
VkAttachmentDescription attach_desc = {};
attach_desc.format = depth_format;
attach_desc.samples = VK_SAMPLE_COUNT_1_BIT;
attach_desc.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
attach_desc.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attach_desc.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attach_desc.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attach_desc.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
VkRenderPassCreateInfo rpci = {};
rpci.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rpci.attachmentCount = 1;
rpci.pAttachments = &attach_desc;
rpci.subpassCount = 2;
rpci.pSubpasses = subpasses;
// Now create RenderPass and verify no errors
VkRenderPass rp;
vkCreateRenderPass(m_device->device(), &rpci, NULL, &rp);
m_errorMonitor->VerifyNotFound();
vkDestroyRenderPass(m_device->device(), rp, NULL);
}
TEST_F(VkPositiveLayerTest, RenderPassDepthStencilLayoutTransition) {
TEST_DESCRIPTION(
"Create a render pass with depth-stencil attachment where layout transition from UNDEFINED TO DS_READ_ONLY_OPTIMAL is set "
"by render pass and verify that transition has correctly occurred at queue submit time with no validation errors.");
ASSERT_NO_FATAL_FAILURE(Init());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageFormatProperties format_props;
vkGetPhysicalDeviceImageFormatProperties(gpu(), depth_format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL,
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, 0, &format_props);
if (format_props.maxExtent.width < 32 || format_props.maxExtent.height < 32) {
printf("%s Depth extent too small, RenderPassDepthStencilLayoutTransition skipped.\n", kSkipPrefix);
return;
}
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
// A renderpass with one depth/stencil attachment.
VkAttachmentDescription attachment = {0,
depth_format,
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
VkAttachmentReference att_ref = {0, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL};
VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 0, nullptr, nullptr, &att_ref, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, &attachment, 1, &subpass, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// A compatible ds image.
VkImageObj image(m_device);
image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0);
ASSERT_TRUE(image.initialized());
VkImageViewCreateInfo ivci = {
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
nullptr,
0,
image.handle(),
VK_IMAGE_VIEW_TYPE_2D,
depth_format,
{VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY, VK_COMPONENT_SWIZZLE_IDENTITY,
VK_COMPONENT_SWIZZLE_IDENTITY},
{VK_IMAGE_ASPECT_DEPTH_BIT, 0, 1, 0, 1},
};
VkImageView view;
err = vkCreateImageView(m_device->device(), &ivci, nullptr, &view);
ASSERT_VK_SUCCESS(err);
VkFramebufferCreateInfo fci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &view, 32, 32, 1};
VkFramebuffer fb;
err = vkCreateFramebuffer(m_device->device(), &fci, nullptr, &fb);
ASSERT_VK_SUCCESS(err);
VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{0, 0}, {32, 32}}, 0, nullptr};
m_commandBuffer->begin();
vkCmdBeginRenderPass(m_commandBuffer->handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE);
vkCmdEndRenderPass(m_commandBuffer->handle());
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer(false);
m_errorMonitor->VerifyNotFound();
// Cleanup
vkDestroyImageView(m_device->device(), view, NULL);
vkDestroyRenderPass(m_device->device(), rp, NULL);
vkDestroyFramebuffer(m_device->device(), fb, NULL);
}
TEST_F(VkPositiveLayerTest, CreatePipelineAttribMatrixType) {
TEST_DESCRIPTION("Test that pipeline validation accepts matrices passed as vertex attributes");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attribs[2];
memset(input_attribs, 0, sizeof(input_attribs));
for (int i = 0; i < 2; i++) {
input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT;
input_attribs[i].location = i;
}
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in mat2x4 x;\n"
"void main(){\n"
" gl_Position = x[0] + x[1];\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(input_attribs, 2);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
/* expect success */
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineAttribArrayType) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attribs[2];
memset(input_attribs, 0, sizeof(input_attribs));
for (int i = 0; i < 2; i++) {
input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT;
input_attribs[i].location = i;
}
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in vec4 x[2];\n"
"void main(){\n"
" gl_Position = x[0] + x[1];\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(input_attribs, 2);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineAttribComponents) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts consuming a vertex attribute through multiple vertex shader inputs, each consuming "
"a different subset of the components.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkVertexInputBindingDescription input_binding;
memset(&input_binding, 0, sizeof(input_binding));
VkVertexInputAttributeDescription input_attribs[3];
memset(input_attribs, 0, sizeof(input_attribs));
for (int i = 0; i < 3; i++) {
input_attribs[i].format = VK_FORMAT_R32G32B32A32_SFLOAT;
input_attribs[i].location = i;
}
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in vec4 x;\n"
"layout(location=1) in vec3 y1;\n"
"layout(location=1, component=3) in float y2;\n"
"layout(location=2) in vec4 z;\n"
"void main(){\n"
" gl_Position = x + vec4(y1, y2) + z;\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(&input_binding, 1);
pipe.AddVertexInputAttribs(input_attribs, 3);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineSimplePositive) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"void main(){\n"
" gl_Position = vec4(0);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineRelaxedTypeMatch) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts the relaxed type matching rules set out in 14.1.3: fundamental type must match, and "
"producer side must have at least as many components");
m_errorMonitor->ExpectSuccess();
// VK 1.0.8 Specification, 14.1.3 "Additionally,..." block
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
char const *vsSource =
"#version 450\n"
"layout(location=0) out vec3 x;\n"
"layout(location=1) out ivec3 y;\n"
"layout(location=2) out vec3 z;\n"
"void main(){\n"
" gl_Position = vec4(0);\n"
" x = vec3(0); y = ivec3(0); z = vec3(0);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"layout(location=0) in float x;\n"
"layout(location=1) flat in int y;\n"
"layout(location=2) in vec2 z;\n"
"void main(){\n"
" color = vec4(1 + x + y + z.x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkResult err = VK_SUCCESS;
err = pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
ASSERT_VK_SUCCESS(err);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineTessPerVertex) {
TEST_DESCRIPTION("Test that pipeline validation accepts per-vertex variables passed between the TCS and TES stages");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().tessellationShader) {
printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix);
return;
}
char const *vsSource =
"#version 450\n"
"void main(){}\n";
char const *tcsSource =
"#version 450\n"
"layout(location=0) out int x[];\n"
"layout(vertices=3) out;\n"
"void main(){\n"
" gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1;\n"
" gl_TessLevelInner[0] = 1;\n"
" x[gl_InvocationID] = gl_InvocationID;\n"
"}\n";
char const *tesSource =
"#version 450\n"
"layout(triangles, equal_spacing, cw) in;\n"
"layout(location=0) in int x[];\n"
"void main(){\n"
" gl_Position.xyz = gl_TessCoord;\n"
" gl_Position.w = x[0] + x[1] + x[2];\n"
"}\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj tcs(m_device, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, this);
VkShaderObj tes(m_device, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0,
VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE};
VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3};
VkPipelineObj pipe(m_device);
pipe.SetInputAssembly(&iasci);
pipe.SetTessellation(&tsci);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&tcs);
pipe.AddShader(&tes);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineGeometryInputBlockPositive) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts a user-defined interface block passed into the geometry shader. This is interesting "
"because the 'extra' array level is not present on the member type, but on the block instance.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().geometryShader) {
printf("%s Device does not support geometry shaders; skipped.\n", kSkipPrefix);
return;
}
char const *vsSource =
"#version 450\n"
"layout(location=0) out VertexData { vec4 x; } vs_out;\n"
"void main(){\n"
" vs_out.x = vec4(1);\n"
"}\n";
char const *gsSource =
"#version 450\n"
"layout(triangles) in;\n"
"layout(triangle_strip, max_vertices=3) out;\n"
"layout(location=0) in VertexData { vec4 x; } gs_in[];\n"
"void main() {\n"
" gl_Position = gs_in[0].x;\n"
" EmitVertex();\n"
"}\n";
char const *fsSource =
"#version 450\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj gs(m_device, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&gs);
pipe.AddShader(&fs);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipeline64BitAttributesPositive) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts basic use of 64bit vertex attributes. This is interesting because they consume "
"multiple locations.");
m_errorMonitor->ExpectSuccess();
if (!EnableDeviceProfileLayer()) {
printf("%s Failed to enable device profile layer.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
ASSERT_NO_FATAL_FAILURE(InitState());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
if (!m_device->phy().features().shaderFloat64) {
printf("%s Device does not support 64bit vertex attributes; skipped.\n", kSkipPrefix);
return;
}
// Set 64bit format to support VTX Buffer feature
PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr;
PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr;
// Load required functions
if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) {
return;
}
VkFormatProperties format_props;
fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, &format_props);
format_props.bufferFeatures |= VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT;
fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), VK_FORMAT_R64G64B64A64_SFLOAT, format_props);
VkVertexInputBindingDescription input_bindings[1];
memset(input_bindings, 0, sizeof(input_bindings));
VkVertexInputAttributeDescription input_attribs[4];
memset(input_attribs, 0, sizeof(input_attribs));
input_attribs[0].location = 0;
input_attribs[0].offset = 0;
input_attribs[0].format = VK_FORMAT_R64G64B64A64_SFLOAT;
input_attribs[1].location = 2;
input_attribs[1].offset = 32;
input_attribs[1].format = VK_FORMAT_R64G64B64A64_SFLOAT;
input_attribs[2].location = 4;
input_attribs[2].offset = 64;
input_attribs[2].format = VK_FORMAT_R64G64B64A64_SFLOAT;
input_attribs[3].location = 6;
input_attribs[3].offset = 96;
input_attribs[3].format = VK_FORMAT_R64G64B64A64_SFLOAT;
char const *vsSource =
"#version 450\n"
"\n"
"layout(location=0) in dmat4 x;\n"
"void main(){\n"
" gl_Position = vec4(x[0][0]);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(location=0) out vec4 color;\n"
"void main(){\n"
" color = vec4(1);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddDefaultColorAttachment();
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddVertexInputBindings(input_bindings, 1);
pipe.AddVertexInputAttribs(input_attribs, 4);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.AppendDummy();
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass());
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, CreatePipelineInputAttachmentPositive) {
TEST_DESCRIPTION("Positive test for a correctly matched input attachment");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
char const *vsSource =
"#version 450\n"
"\n"
"void main(){\n"
" gl_Position = vec4(1);\n"
"}\n";
char const *fsSource =
"#version 450\n"
"\n"
"layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x;\n"
"layout(location=0) out vec4 color;\n"
"void main() {\n"
" color = subpassLoad(x);\n"
"}\n";
VkShaderObj vs(m_device, vsSource, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, this);
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr};
const VkDescriptorSetLayoutObj dsl(m_device, {dslb});
const VkPipelineLayoutObj pl(m_device, {&dsl});
VkAttachmentDescription descs[2] = {
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL},
{0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL},
};
VkAttachmentReference color = {
0,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
};
VkAttachmentReference input = {
1,
VK_IMAGE_LAYOUT_GENERAL,
};
VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr};
VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr};
VkRenderPass rp;
VkResult err = vkCreateRenderPass(m_device->device(), &rpci, nullptr, &rp);
ASSERT_VK_SUCCESS(err);
// should be OK. would go wrong here if it's going to...
pipe.CreateVKPipeline(pl.handle(), rp);
m_errorMonitor->VerifyNotFound();
vkDestroyRenderPass(m_device->device(), rp, nullptr);
}
TEST_F(VkPositiveLayerTest, CreateComputePipelineMissingDescriptorUnusedPositive) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts a compute pipeline which declares a descriptor-backed resource which is not "
"provided, but the shader does not statically use it. This is interesting because it requires compute pipelines to have a "
"proper descriptor use walk, which they didn't for some time.");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
char const *csSource =
"#version 450\n"
"\n"
"layout(local_size_x=1) in;\n"
"layout(set=0, binding=0) buffer block { vec4 x; };\n"
"void main(){\n"
" // x is not used.\n"
"}\n";
VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
VkDescriptorSetObj descriptorSet(m_device);
descriptorSet.CreateVKDescriptorSet(m_commandBuffer);
VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
nullptr,
0,
{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0,
VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr},
descriptorSet.GetPipelineLayout(),
VK_NULL_HANDLE,
-1};
VkPipeline pipe;
VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe);
m_errorMonitor->VerifyNotFound();
if (err == VK_SUCCESS) {
vkDestroyPipeline(m_device->device(), pipe, nullptr);
}
}
TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsSampler) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts a shader consuming only the sampler portion of a combined image + sampler");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
std::vector<VkDescriptorSetLayoutBinding> bindings = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
};
const VkDescriptorSetLayoutObj dsl(m_device, bindings);
const VkPipelineLayoutObj pl(m_device, {&dsl});
char const *csSource =
"#version 450\n"
"\n"
"layout(local_size_x=1) in;\n"
"layout(set=0, binding=0) uniform sampler s;\n"
"layout(set=0, binding=1) uniform texture2D t;\n"
"layout(set=0, binding=2) buffer block { vec4 x; };\n"
"void main() {\n"
" x = texture(sampler2D(t, s), vec2(0));\n"
"}\n";
VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
nullptr,
0,
{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0,
VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr},
pl.handle(),
VK_NULL_HANDLE,
-1};
VkPipeline pipe;
VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe);
m_errorMonitor->VerifyNotFound();
if (err == VK_SUCCESS) {
vkDestroyPipeline(m_device->device(), pipe, nullptr);
}
}
TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsImage) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts a shader consuming only the image portion of a combined image + sampler");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
std::vector<VkDescriptorSetLayoutBinding> bindings = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{2, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
};
const VkDescriptorSetLayoutObj dsl(m_device, bindings);
const VkPipelineLayoutObj pl(m_device, {&dsl});
char const *csSource =
"#version 450\n"
"\n"
"layout(local_size_x=1) in;\n"
"layout(set=0, binding=0) uniform texture2D t;\n"
"layout(set=0, binding=1) uniform sampler s;\n"
"layout(set=0, binding=2) buffer block { vec4 x; };\n"
"void main() {\n"
" x = texture(sampler2D(t, s), vec2(0));\n"
"}\n";
VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
nullptr,
0,
{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0,
VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr},
pl.handle(),
VK_NULL_HANDLE,
-1};
VkPipeline pipe;
VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe);
m_errorMonitor->VerifyNotFound();
if (err == VK_SUCCESS) {
vkDestroyPipeline(m_device->device(), pipe, nullptr);
}
}
TEST_F(VkPositiveLayerTest, CreateComputePipelineCombinedImageSamplerConsumedAsBoth) {
TEST_DESCRIPTION(
"Test that pipeline validation accepts a shader consuming both the sampler and the image of a combined image+sampler but "
"via separate variables");
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
std::vector<VkDescriptorSetLayoutBinding> bindings = {
{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
{1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr},
};
const VkDescriptorSetLayoutObj dsl(m_device, bindings);
const VkPipelineLayoutObj pl(m_device, {&dsl});
char const *csSource =
"#version 450\n"
"\n"
"layout(local_size_x=1) in;\n"
"layout(set=0, binding=0) uniform texture2D t;\n"
"layout(set=0, binding=0) uniform sampler s; // both binding 0!\n"
"layout(set=0, binding=1) buffer block { vec4 x; };\n"
"void main() {\n"
" x = texture(sampler2D(t, s), vec2(0));\n"
"}\n";
VkShaderObj cs(m_device, csSource, VK_SHADER_STAGE_COMPUTE_BIT, this);
VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
nullptr,
0,
{VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0,
VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr},
pl.handle(),
VK_NULL_HANDLE,
-1};
VkPipeline pipe;
VkResult err = vkCreateComputePipelines(m_device->device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe);
m_errorMonitor->VerifyNotFound();
if (err == VK_SUCCESS) {
vkDestroyPipeline(m_device->device(), pipe, nullptr);
}
}
TEST_F(VkPositiveLayerTest, CreateDescriptorSetBindingWithIgnoredSamplers) {
TEST_DESCRIPTION("Test that layers conditionally do ignore the pImmutableSamplers on vkCreateDescriptorSetLayout");
bool prop2_found = false;
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
prop2_found = true;
} else {
printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix,
VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
bool push_descriptor_found = false;
if (prop2_found && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
push_descriptor_found = true;
} else {
printf("%s %s Extension not supported, skipping push descriptor sub-tests\n", kSkipPrefix,
VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitState());
const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD;
const uint64_t fake_address_32 = 0xCDCDCDCD;
const void *fake_pointer =
sizeof(void *) == 8 ? reinterpret_cast<void *>(fake_address_64) : reinterpret_cast<void *>(fake_address_32);
const VkSampler *hopefully_undereferencable_pointer = reinterpret_cast<const VkSampler *>(fake_pointer);
// regular descriptors
m_errorMonitor->ExpectSuccess();
{
const VkDescriptorSetLayoutBinding non_sampler_bindings[] = {
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{6, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{7, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{8, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
};
const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr, 0,
static_cast<uint32_t>(size(non_sampler_bindings)), non_sampler_bindings};
VkDescriptorSetLayout dsl;
const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl);
ASSERT_VK_SUCCESS(err);
vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr);
}
m_errorMonitor->VerifyNotFound();
if (push_descriptor_found) {
// push descriptors
m_errorMonitor->ExpectSuccess();
{
const VkDescriptorSetLayoutBinding non_sampler_bindings[] = {
{0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{2, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{3, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{4, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{5, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
{6, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, hopefully_undereferencable_pointer},
};
const VkDescriptorSetLayoutCreateInfo dslci = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, nullptr,
VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
static_cast<uint32_t>(size(non_sampler_bindings)), non_sampler_bindings};
VkDescriptorSetLayout dsl;
const VkResult err = vkCreateDescriptorSetLayout(m_device->device(), &dslci, nullptr, &dsl);
ASSERT_VK_SUCCESS(err);
vkDestroyDescriptorSetLayout(m_device->device(), dsl, nullptr);
}
m_errorMonitor->VerifyNotFound();
}
}
TEST_F(VkPositiveLayerTest, Maintenance1Tests) {
TEST_DESCRIPTION("Validate various special cases for the Maintenance1_KHR extension");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
} else {
printf("%s Maintenance1 Extension not supported, skipping tests\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
VkCommandBufferObj cmd_buf(m_device, m_commandPool);
cmd_buf.begin();
// Set Negative height, should give error if Maintenance 1 is not enabled
VkViewport viewport = {0, 0, 16, -16, 0, 1};
vkCmdSetViewport(cmd_buf.handle(), 0, 1, &viewport);
cmd_buf.end();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, DuplicateValidPNextStructures) {
TEST_DESCRIPTION("Create a pNext chain containing valid structures, but with a duplicate structure type");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME);
} else {
printf("%s VK_NV_dedicated_allocation extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Create two pNext structures which by themselves would be valid
VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {};
VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info_2 = {};
dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV;
dedicated_buffer_create_info.pNext = &dedicated_buffer_create_info_2;
dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE;
dedicated_buffer_create_info_2.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV;
dedicated_buffer_create_info_2.pNext = nullptr;
dedicated_buffer_create_info_2.dedicatedAllocation = VK_TRUE;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = &dedicated_buffer_create_info;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "chain contains duplicate structure types");
VkBuffer buffer;
vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
m_errorMonitor->VerifyFound();
}
TEST_F(VkLayerTest, DedicatedAllocation) {
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
} else {
printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkMemoryPropertyFlags mem_flags = 0;
const VkDeviceSize resource_size = 1024;
auto buffer_info = VkBufferObj::create_info(resource_size, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
VkBufferObj buffer;
buffer.init_no_mem(*m_device, buffer_info);
auto buffer_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), mem_flags);
auto buffer_dedicated_info = lvl_init_struct<VkMemoryDedicatedAllocateInfoKHR>();
buffer_dedicated_info.buffer = buffer.handle();
buffer_alloc_info.pNext = &buffer_dedicated_info;
vk_testing::DeviceMemory dedicated_buffer_memory;
dedicated_buffer_memory.init(*m_device, buffer_alloc_info);
VkBufferObj wrong_buffer;
wrong_buffer.init_no_mem(*m_device, buffer_info);
// Bind with wrong buffer
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindBufferMemory-memory-01508");
vkBindBufferMemory(m_device->handle(), wrong_buffer.handle(), dedicated_buffer_memory.handle(), 0);
m_errorMonitor->VerifyFound();
// Bind with non-zero offset (same VUID)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkBindBufferMemory-memory-01508"); // offset must be zero
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkBindBufferMemory-size-01037"); // offset pushes us past size
auto offset = buffer.memory_requirements().alignment;
vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), offset);
m_errorMonitor->VerifyFound();
// Bind correctly (depends on the "skip" above)
m_errorMonitor->ExpectSuccess();
vkBindBufferMemory(m_device->handle(), buffer.handle(), dedicated_buffer_memory.handle(), 0);
m_errorMonitor->VerifyNotFound();
// And for images...
vk_testing::Image image;
vk_testing::Image wrong_image;
auto image_info = vk_testing::Image::create_info();
image_info.extent.width = resource_size;
image_info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
image_info.format = VK_FORMAT_R8G8B8A8_UNORM;
image.init_no_mem(*m_device, image_info);
wrong_image.init_no_mem(*m_device, image_info);
auto image_dedicated_info = lvl_init_struct<VkMemoryDedicatedAllocateInfoKHR>();
image_dedicated_info.image = image.handle();
auto image_alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), mem_flags);
image_alloc_info.pNext = &image_dedicated_info;
vk_testing::DeviceMemory dedicated_image_memory;
dedicated_image_memory.init(*m_device, image_alloc_info);
// Bind with wrong image
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkBindImageMemory-memory-01509");
vkBindImageMemory(m_device->handle(), wrong_image.handle(), dedicated_image_memory.handle(), 0);
m_errorMonitor->VerifyFound();
// Bind with non-zero offset (same VUID)
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkBindImageMemory-memory-01509"); // offset must be zero
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT,
"VUID-vkBindImageMemory-size-01049"); // offset pushes us past size
auto image_offset = image.memory_requirements().alignment;
vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), image_offset);
m_errorMonitor->VerifyFound();
// Bind correctly (depends on the "skip" above)
m_errorMonitor->ExpectSuccess();
vkBindImageMemory(m_device->handle(), image.handle(), dedicated_image_memory.handle(), 0);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ValidStructPNext) {
TEST_DESCRIPTION("Verify that a valid pNext value is handled correctly");
// Positive test to check parameter_validation and unique_objects support for NV_dedicated_allocation
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME);
} else {
printf("%s VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME Extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
VkDedicatedAllocationBufferCreateInfoNV dedicated_buffer_create_info = {};
dedicated_buffer_create_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV;
dedicated_buffer_create_info.pNext = nullptr;
dedicated_buffer_create_info.dedicatedAllocation = VK_TRUE;
uint32_t queue_family_index = 0;
VkBufferCreateInfo buffer_create_info = {};
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.pNext = &dedicated_buffer_create_info;
buffer_create_info.size = 1024;
buffer_create_info.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
buffer_create_info.queueFamilyIndexCount = 1;
buffer_create_info.pQueueFamilyIndices = &queue_family_index;
VkBuffer buffer;
VkResult err = vkCreateBuffer(m_device->device(), &buffer_create_info, NULL, &buffer);
ASSERT_VK_SUCCESS(err);
VkMemoryRequirements memory_reqs;
vkGetBufferMemoryRequirements(m_device->device(), buffer, &memory_reqs);
VkDedicatedAllocationMemoryAllocateInfoNV dedicated_memory_info = {};
dedicated_memory_info.sType = VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV;
dedicated_memory_info.pNext = nullptr;
dedicated_memory_info.buffer = buffer;
dedicated_memory_info.image = VK_NULL_HANDLE;
VkMemoryAllocateInfo memory_info = {};
memory_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memory_info.pNext = &dedicated_memory_info;
memory_info.allocationSize = memory_reqs.size;
bool pass;
pass = m_device->phy().set_memory_type(memory_reqs.memoryTypeBits, &memory_info, 0);
ASSERT_TRUE(pass);
VkDeviceMemory buffer_memory;
err = vkAllocateMemory(m_device->device(), &memory_info, NULL, &buffer_memory);
ASSERT_VK_SUCCESS(err);
err = vkBindBufferMemory(m_device->device(), buffer, buffer_memory, 0);
ASSERT_VK_SUCCESS(err);
vkDestroyBuffer(m_device->device(), buffer, NULL);
vkFreeMemory(m_device->device(), buffer_memory, NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, PSOPolygonModeValid) {
TEST_DESCRIPTION("Verify that using a solid polygon fill mode works correctly.");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
std::vector<const char *> device_extension_names;
auto features = m_device->phy().features();
// Artificially disable support for non-solid fill modes
features.fillModeNonSolid = false;
// The sacrificial device object
VkDeviceObj test_device(0, gpu(), device_extension_names, &features);
VkRenderpassObj render_pass(&test_device);
const VkPipelineLayoutObj pipeline_layout(&test_device);
VkPipelineRasterizationStateCreateInfo rs_ci = {};
rs_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rs_ci.pNext = nullptr;
rs_ci.lineWidth = 1.0f;
rs_ci.rasterizerDiscardEnable = false;
VkShaderObj vs(&test_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(&test_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
// Set polygonMode=FILL. No error is expected
m_errorMonitor->ExpectSuccess();
{
VkPipelineObj pipe(&test_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
// Set polygonMode to a good value
rs_ci.polygonMode = VK_POLYGON_MODE_FILL;
pipe.SetRasterization(&rs_ci);
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
}
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, LongSemaphoreChain) {
m_errorMonitor->ExpectSuccess();
ASSERT_NO_FATAL_FAILURE(Init());
VkResult err;
std::vector<VkSemaphore> semaphores;
const int chainLength = 32768;
VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
for (int i = 0; i < chainLength; i++) {
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, 0};
VkSemaphore semaphore;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &semaphore);
ASSERT_VK_SUCCESS(err);
semaphores.push_back(semaphore);
VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO,
nullptr,
semaphores.size() > 1 ? 1u : 0u,
semaphores.size() > 1 ? &semaphores[semaphores.size() - 2] : nullptr,
&flags,
0,
nullptr,
1,
&semaphores[semaphores.size() - 1]};
err = vkQueueSubmit(m_device->m_queue, 1, &si, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
}
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
VkFence fence;
err = vkCreateFence(m_device->device(), &fci, nullptr, &fence);
ASSERT_VK_SUCCESS(err);
VkSubmitInfo si = {VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &semaphores.back(), &flags, 0, nullptr, 0, nullptr};
err = vkQueueSubmit(m_device->m_queue, 1, &si, fence);
ASSERT_VK_SUCCESS(err);
vkWaitForFences(m_device->device(), 1, &fence, VK_TRUE, UINT64_MAX);
for (auto semaphore : semaphores) vkDestroySemaphore(m_device->device(), semaphore, nullptr);
vkDestroyFence(m_device->device(), fence, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ExternalSemaphore) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external semaphore instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for external semaphore device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME);
} else {
printf("%s External semaphore extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external semaphore import and export capability
VkPhysicalDeviceExternalSemaphoreInfoKHR esi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR, nullptr,
handle_type};
VkExternalSemaphorePropertiesKHR esp = {VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalSemaphorePropertiesKHR =
(PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)vkGetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalSemaphorePropertiesKHR");
vkGetPhysicalDeviceExternalSemaphorePropertiesKHR(gpu(), &esi, &esp);
if (!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(esp.externalSemaphoreFeatures & VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External semaphore does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
m_errorMonitor->ExpectSuccess();
// Create a semaphore to export payload from
VkExportSemaphoreCreateInfoKHR esci = {VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR, nullptr, handle_type};
VkSemaphoreCreateInfo sci = {VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, &esci, 0};
VkSemaphore export_semaphore;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &export_semaphore);
ASSERT_VK_SUCCESS(err);
// Create a semaphore to import payload into
sci.pNext = nullptr;
VkSemaphore import_semaphore;
err = vkCreateSemaphore(m_device->device(), &sci, nullptr, &import_semaphore);
ASSERT_VK_SUCCESS(err);
#ifdef _WIN32
// Export semaphore payload to an opaque handle
HANDLE handle = nullptr;
VkSemaphoreGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_semaphore,
handle_type};
auto vkGetSemaphoreWin32HandleKHR =
(PFN_vkGetSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreWin32HandleKHR");
err = vkGetSemaphoreWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above
VkImportSemaphoreWin32HandleInfoKHR ihi = {
VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR, nullptr, import_semaphore, 0, handle_type, handle, nullptr};
auto vkImportSemaphoreWin32HandleKHR =
(PFN_vkImportSemaphoreWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreWin32HandleKHR");
err = vkImportSemaphoreWin32HandleKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#else
// Export semaphore payload to an opaque handle
int fd = 0;
VkSemaphoreGetFdInfoKHR ghi = {VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR, nullptr, export_semaphore, handle_type};
auto vkGetSemaphoreFdKHR = (PFN_vkGetSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetSemaphoreFdKHR");
err = vkGetSemaphoreFdKHR(m_device->device(), &ghi, &fd);
ASSERT_VK_SUCCESS(err);
// Import opaque handle exported above
VkImportSemaphoreFdInfoKHR ihi = {
VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR, nullptr, import_semaphore, 0, handle_type, fd};
auto vkImportSemaphoreFdKHR = (PFN_vkImportSemaphoreFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportSemaphoreFdKHR");
err = vkImportSemaphoreFdKHR(m_device->device(), &ihi);
ASSERT_VK_SUCCESS(err);
#endif
// Signal the exported semaphore and wait on the imported semaphore
VkPipelineStageFlags flags = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkSubmitInfo si[] = {
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 0, nullptr, &flags, 0, nullptr, 1, &export_semaphore},
{VK_STRUCTURE_TYPE_SUBMIT_INFO, nullptr, 1, &import_semaphore, &flags, 0, nullptr, 0, nullptr},
};
err = vkQueueSubmit(m_device->m_queue, 4, si, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
if (m_device->phy().features().sparseBinding) {
// Signal the imported semaphore and wait on the exported semaphore
VkBindSparseInfo bi[] = {
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr, 1, &import_semaphore},
{VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, nullptr, 1, &export_semaphore, 0, nullptr, 0, nullptr, 0, nullptr, 0, nullptr},
};
err = vkQueueBindSparse(m_device->m_queue, 4, bi, VK_NULL_HANDLE);
ASSERT_VK_SUCCESS(err);
}
// Cleanup
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vkDestroySemaphore(m_device->device(), export_semaphore, nullptr);
vkDestroySemaphore(m_device->device(), import_semaphore, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ExternalFence) {
#ifdef _WIN32
const auto extension_name = VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto extension_name = VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external fence instance extensions
if (InstanceExtensionSupported(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME);
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for external fence device extensions
if (DeviceExtensionSupported(gpu(), nullptr, extension_name)) {
m_device_extension_names.push_back(extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME);
} else {
printf("%s External fence extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
// Check for external fence import and export capability
VkPhysicalDeviceExternalFenceInfoKHR efi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR, nullptr, handle_type};
VkExternalFencePropertiesKHR efp = {VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR, nullptr};
auto vkGetPhysicalDeviceExternalFencePropertiesKHR = (PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)vkGetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalFencePropertiesKHR");
vkGetPhysicalDeviceExternalFencePropertiesKHR(gpu(), &efi, &efp);
if (!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR) ||
!(efp.externalFenceFeatures & VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External fence does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
VkResult err;
m_errorMonitor->ExpectSuccess();
// Create a fence to export payload from
VkFence export_fence;
{
VkExportFenceCreateInfoKHR efci = {VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR, nullptr, handle_type};
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, &efci, 0};
err = vkCreateFence(m_device->device(), &fci, nullptr, &export_fence);
ASSERT_VK_SUCCESS(err);
}
// Create a fence to import payload into
VkFence import_fence;
{
VkFenceCreateInfo fci = {VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, nullptr, 0};
err = vkCreateFence(m_device->device(), &fci, nullptr, &import_fence);
ASSERT_VK_SUCCESS(err);
}
#ifdef _WIN32
// Export fence payload to an opaque handle
HANDLE handle = nullptr;
{
VkFenceGetWin32HandleInfoKHR ghi = {VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceWin32HandleKHR =
(PFN_vkGetFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceWin32HandleKHR");
err = vkGetFenceWin32HandleKHR(m_device->device(), &ghi, &handle);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceWin32HandleInfoKHR ifi = {
VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR, nullptr, import_fence, 0, handle_type, handle, nullptr};
auto vkImportFenceWin32HandleKHR =
(PFN_vkImportFenceWin32HandleKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceWin32HandleKHR");
err = vkImportFenceWin32HandleKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#else
// Export fence payload to an opaque handle
int fd = 0;
{
VkFenceGetFdInfoKHR gfi = {VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR, nullptr, export_fence, handle_type};
auto vkGetFenceFdKHR = (PFN_vkGetFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkGetFenceFdKHR");
err = vkGetFenceFdKHR(m_device->device(), &gfi, &fd);
ASSERT_VK_SUCCESS(err);
}
// Import opaque handle exported above
{
VkImportFenceFdInfoKHR ifi = {VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR, nullptr, import_fence, 0, handle_type, fd};
auto vkImportFenceFdKHR = (PFN_vkImportFenceFdKHR)vkGetDeviceProcAddr(m_device->device(), "vkImportFenceFdKHR");
err = vkImportFenceFdKHR(m_device->device(), &ifi);
ASSERT_VK_SUCCESS(err);
}
#endif
// Signal the exported fence and wait on the imported fence
vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence);
vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000);
vkResetFences(m_device->device(), 1, &import_fence);
vkQueueSubmit(m_device->m_queue, 0, nullptr, export_fence);
vkWaitForFences(m_device->device(), 1, &import_fence, VK_TRUE, 1000000000);
vkResetFences(m_device->device(), 1, &import_fence);
// Signal the imported fence and wait on the exported fence
vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000);
vkResetFences(m_device->device(), 1, &export_fence);
vkQueueSubmit(m_device->m_queue, 0, nullptr, import_fence);
vkWaitForFences(m_device->device(), 1, &export_fence, VK_TRUE, 1000000000);
vkResetFences(m_device->device(), 1, &export_fence);
// Cleanup
err = vkQueueWaitIdle(m_device->m_queue);
ASSERT_VK_SUCCESS(err);
vkDestroyFence(m_device->device(), export_fence, nullptr);
vkDestroyFence(m_device->device(), import_fence, nullptr);
m_errorMonitor->VerifyNotFound();
}
extern "C" void *ReleaseNullFence(void *arg) {
struct thread_data_struct *data = (struct thread_data_struct *)arg;
for (int i = 0; i < 40000; i++) {
vkDestroyFence(data->device, VK_NULL_HANDLE, NULL);
if (data->bailout) {
break;
}
}
return NULL;
}
TEST_F(VkPositiveLayerTest, ThreadNullFenceCollision) {
test_platform_thread thread;
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "THREADING ERROR");
ASSERT_NO_FATAL_FAILURE(Init());
struct thread_data_struct data;
data.device = m_device->device();
data.bailout = false;
m_errorMonitor->SetBailout(&data.bailout);
// Call vkDestroyFence of VK_NULL_HANDLE repeatedly using multiple threads.
// There should be no validation error from collision of that non-object.
test_platform_thread_create(&thread, ReleaseNullFence, (void *)&data);
for (int i = 0; i < 40000; i++) {
vkDestroyFence(m_device->device(), VK_NULL_HANDLE, NULL);
}
test_platform_thread_join(thread, NULL);
m_errorMonitor->SetBailout(NULL);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, ClearColorImageWithValidRange) {
TEST_DESCRIPTION("Record clear color with a valid VkImageSubresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
VkImageObj image(m_device);
image.Init(32, 32, 1, VK_FORMAT_B8G8R8A8_UNORM, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
image.SetLayout(VK_IMAGE_ASPECT_COLOR_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkClearColorValue clear_color = {{0.0f, 0.0f, 0.0f, 1.0f}};
m_commandBuffer->begin();
const auto cb_handle = m_commandBuffer->handle();
// Try good case
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyNotFound();
}
// Try good case with VK_REMAINING
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
vkCmdClearColorImage(cb_handle, image.handle(), image.Layout(), &clear_color, 1, &range);
m_errorMonitor->VerifyNotFound();
}
}
TEST_F(VkPositiveLayerTest, ClearDepthStencilWithValidRange) {
TEST_DESCRIPTION("Record clear depth with a valid VkImageSubresourceRange");
ASSERT_NO_FATAL_FAILURE(Init());
ASSERT_NO_FATAL_FAILURE(InitRenderTarget());
auto depth_format = FindSupportedDepthStencilFormat(gpu());
if (!depth_format) {
printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix);
return;
}
VkImageObj image(m_device);
image.Init(32, 32, 1, depth_format, VK_IMAGE_USAGE_TRANSFER_DST_BIT, VK_IMAGE_TILING_OPTIMAL);
ASSERT_TRUE(image.create_info().arrayLayers == 1);
ASSERT_TRUE(image.initialized());
const VkImageAspectFlags ds_aspect = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
image.SetLayout(ds_aspect, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
const VkClearDepthStencilValue clear_value = {};
m_commandBuffer->begin();
const auto cb_handle = m_commandBuffer->handle();
// Try good case
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {ds_aspect, 0, 1, 0, 1};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyNotFound();
}
// Try good case with VK_REMAINING
{
m_errorMonitor->ExpectSuccess();
VkImageSubresourceRange range = {ds_aspect, 0, VK_REMAINING_MIP_LEVELS, 0, VK_REMAINING_ARRAY_LAYERS};
vkCmdClearDepthStencilImage(cb_handle, image.handle(), image.Layout(), &clear_value, 1, &range);
m_errorMonitor->VerifyNotFound();
}
}
TEST_F(VkPositiveLayerTest, CreateGraphicsPipelineWithIgnoredPointers) {
TEST_DESCRIPTION("Create Graphics Pipeline with pointers that must be ignored by layers");
ASSERT_NO_FATAL_FAILURE(Init());
m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu());
ASSERT_TRUE(m_depth_stencil_fmt != 0);
m_depthStencil->Init(m_device, static_cast<int32_t>(m_width), static_cast<int32_t>(m_height), m_depth_stencil_fmt);
ASSERT_NO_FATAL_FAILURE(InitRenderTarget(m_depthStencil->BindInfo()));
const uint64_t fake_address_64 = 0xCDCDCDCDCDCDCDCD;
const uint64_t fake_address_32 = 0xCDCDCDCD;
void *hopefully_undereferencable_pointer =
sizeof(void *) == 8 ? reinterpret_cast<void *>(fake_address_64) : reinterpret_cast<void *>(fake_address_32);
VkShaderObj vs(m_device, "#version 450\nvoid main(){gl_Position = vec4(0.0, 0.0, 0.0, 1.0);}\n", VK_SHADER_STAGE_VERTEX_BIT,
this);
const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
0,
nullptr, // bindings
0,
nullptr // attributes
};
const VkPipelineInputAssemblyStateCreateInfo pipeline_input_assembly_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
VK_FALSE // primitive restart
};
const VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info_template{
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
VK_FALSE, // depthClamp
VK_FALSE, // rasterizerDiscardEnable
VK_POLYGON_MODE_FILL,
VK_CULL_MODE_NONE,
VK_FRONT_FACE_COUNTER_CLOCKWISE,
VK_FALSE, // depthBias
0.0f,
0.0f,
0.0f, // depthBias params
1.0f // lineWidth
};
VkPipelineLayout pipeline_layout;
{
VkPipelineLayoutCreateInfo pipeline_layout_create_info{
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
nullptr, // pNext
0, // flags
0,
nullptr, // layouts
0,
nullptr // push constants
};
VkResult err = vkCreatePipelineLayout(m_device->device(), &pipeline_layout_create_info, nullptr, &pipeline_layout);
ASSERT_VK_SUCCESS(err);
}
// try disabled rasterizer and no tessellation
{
m_errorMonitor->ExpectSuccess();
VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info =
pipeline_rasterization_state_create_info_template;
pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_TRUE;
VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{
VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
nullptr, // pNext
0, // flags
1, // stageCount
&vs.GetStageCreateInfo(),
&pipeline_vertex_input_state_create_info,
&pipeline_input_assembly_state_create_info,
reinterpret_cast<const VkPipelineTessellationStateCreateInfo *>(hopefully_undereferencable_pointer),
reinterpret_cast<const VkPipelineViewportStateCreateInfo *>(hopefully_undereferencable_pointer),
&pipeline_rasterization_state_create_info,
reinterpret_cast<const VkPipelineMultisampleStateCreateInfo *>(hopefully_undereferencable_pointer),
reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo *>(hopefully_undereferencable_pointer),
reinterpret_cast<const VkPipelineColorBlendStateCreateInfo *>(hopefully_undereferencable_pointer),
nullptr, // dynamic states
pipeline_layout,
m_renderPass,
0, // subpass
VK_NULL_HANDLE,
0};
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline);
m_errorMonitor->VerifyNotFound();
vkDestroyPipeline(m_device->handle(), pipeline, nullptr);
}
const VkPipelineMultisampleStateCreateInfo pipeline_multisample_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
VK_SAMPLE_COUNT_1_BIT,
VK_FALSE, // sample shading
0.0f, // minSampleShading
nullptr, // pSampleMask
VK_FALSE, // alphaToCoverageEnable
VK_FALSE // alphaToOneEnable
};
// try enabled rasterizer but no subpass attachments
{
m_errorMonitor->ExpectSuccess();
VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info =
pipeline_rasterization_state_create_info_template;
pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE;
VkViewport viewport = {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f};
VkRect2D scissor = {{0, 0}, {static_cast<uint32_t>(m_width), static_cast<uint32_t>(m_height)}};
const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
1,
&viewport,
1,
&scissor};
VkRenderPass render_pass;
{
VkSubpassDescription subpass_desc = {};
VkRenderPassCreateInfo render_pass_create_info{
VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
nullptr, // pNext
0, // flags
0,
nullptr, // attachments
1,
&subpass_desc,
0,
nullptr // subpass dependencies
};
VkResult err = vkCreateRenderPass(m_device->handle(), &render_pass_create_info, nullptr, &render_pass);
ASSERT_VK_SUCCESS(err);
}
VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{
VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
nullptr, // pNext
0, // flags
1, // stageCount
&vs.GetStageCreateInfo(),
&pipeline_vertex_input_state_create_info,
&pipeline_input_assembly_state_create_info,
nullptr,
&pipeline_viewport_state_create_info,
&pipeline_rasterization_state_create_info,
&pipeline_multisample_state_create_info,
reinterpret_cast<const VkPipelineDepthStencilStateCreateInfo *>(hopefully_undereferencable_pointer),
reinterpret_cast<const VkPipelineColorBlendStateCreateInfo *>(hopefully_undereferencable_pointer),
nullptr, // dynamic states
pipeline_layout,
render_pass,
0, // subpass
VK_NULL_HANDLE,
0};
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline);
m_errorMonitor->VerifyNotFound();
vkDestroyPipeline(m_device->handle(), pipeline, nullptr);
vkDestroyRenderPass(m_device->handle(), render_pass, nullptr);
}
// try dynamic viewport and scissor
{
m_errorMonitor->ExpectSuccess();
VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info =
pipeline_rasterization_state_create_info_template;
pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_FALSE;
const VkPipelineViewportStateCreateInfo pipeline_viewport_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
1,
reinterpret_cast<const VkViewport *>(hopefully_undereferencable_pointer),
1,
reinterpret_cast<const VkRect2D *>(hopefully_undereferencable_pointer)};
const VkPipelineDepthStencilStateCreateInfo pipeline_depth_stencil_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
};
const VkPipelineColorBlendAttachmentState pipeline_color_blend_attachment_state = {};
const VkPipelineColorBlendStateCreateInfo pipeline_color_blend_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
VK_FALSE,
VK_LOGIC_OP_CLEAR,
1,
&pipeline_color_blend_attachment_state,
{0.0f, 0.0f, 0.0f, 0.0f}};
const VkDynamicState dynamic_states[2] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR};
const VkPipelineDynamicStateCreateInfo pipeline_dynamic_state_create_info{
VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
nullptr, // pNext
0, // flags
2, dynamic_states};
VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
nullptr, // pNext
0, // flags
1, // stageCount
&vs.GetStageCreateInfo(),
&pipeline_vertex_input_state_create_info,
&pipeline_input_assembly_state_create_info,
nullptr,
&pipeline_viewport_state_create_info,
&pipeline_rasterization_state_create_info,
&pipeline_multisample_state_create_info,
&pipeline_depth_stencil_state_create_info,
&pipeline_color_blend_state_create_info,
&pipeline_dynamic_state_create_info, // dynamic states
pipeline_layout,
m_renderPass,
0, // subpass
VK_NULL_HANDLE,
0};
VkPipeline pipeline;
vkCreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline);
m_errorMonitor->VerifyNotFound();
vkDestroyPipeline(m_device->handle(), pipeline, nullptr);
}
vkDestroyPipelineLayout(m_device->handle(), pipeline_layout, nullptr);
}
TEST_F(VkPositiveLayerTest, ExternalMemory) {
TEST_DESCRIPTION("Perform a copy through a pair of buffers linked by external memory");
#ifdef _WIN32
const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR;
#else
const auto ext_mem_extension_name = VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME;
const auto handle_type = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
#endif
// Check for external memory instance extensions
std::vector<const char *> reqd_instance_extensions = {
{VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME}};
for (auto extension_name : reqd_instance_extensions) {
if (InstanceExtensionSupported(extension_name)) {
m_instance_extension_names.push_back(extension_name);
} else {
printf("%s Required instance extension %s not supported, skipping test\n", kSkipPrefix, extension_name);
return;
}
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for import/export capability
VkPhysicalDeviceExternalBufferInfoKHR ebi = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR, nullptr, 0,
VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, handle_type};
VkExternalBufferPropertiesKHR ebp = {VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR, nullptr, {0, 0, 0}};
auto vkGetPhysicalDeviceExternalBufferPropertiesKHR = (PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)vkGetInstanceProcAddr(
instance(), "vkGetPhysicalDeviceExternalBufferPropertiesKHR");
ASSERT_TRUE(vkGetPhysicalDeviceExternalBufferPropertiesKHR != nullptr);
vkGetPhysicalDeviceExternalBufferPropertiesKHR(gpu(), &ebi, &ebp);
if (!(ebp.externalMemoryProperties.compatibleHandleTypes & handle_type) ||
!(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR) ||
!(ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR)) {
printf("%s External buffer does not support importing and exporting, skipping test\n", kSkipPrefix);
return;
}
// Check if dedicated allocation is required
bool dedicated_allocation =
ebp.externalMemoryProperties.externalMemoryFeatures & VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR;
if (dedicated_allocation) {
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
} else {
printf("%s Dedicated allocation extension not supported, skipping test\n", kSkipPrefix);
return;
}
}
// Check for external memory device extensions
if (DeviceExtensionSupported(gpu(), nullptr, ext_mem_extension_name)) {
m_device_extension_names.push_back(ext_mem_extension_name);
m_device_extension_names.push_back(VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME);
} else {
printf("%s External memory extension not supported, skipping test\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT);
VkMemoryPropertyFlags mem_flags = 0;
const VkDeviceSize buffer_size = 1024;
// Create export and import buffers
const VkExternalMemoryBufferCreateInfoKHR external_buffer_info = {VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR,
nullptr, handle_type};
auto buffer_info = VkBufferObj::create_info(buffer_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT);
buffer_info.pNext = &external_buffer_info;
VkBufferObj buffer_export;
buffer_export.init_no_mem(*m_device, buffer_info);
VkBufferObj buffer_import;
buffer_import.init_no_mem(*m_device, buffer_info);
// Allocation info
auto alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_export.memory_requirements(), mem_flags);
// Add export allocation info to pNext chain
VkExportMemoryAllocateInfoKHR export_info = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR, nullptr, handle_type};
alloc_info.pNext = &export_info;
// Add dedicated allocation info to pNext chain if required
VkMemoryDedicatedAllocateInfoKHR dedicated_info = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, nullptr,
VK_NULL_HANDLE, buffer_export.handle()};
if (dedicated_allocation) {
export_info.pNext = &dedicated_info;
}
// Allocate memory to be exported
vk_testing::DeviceMemory memory_export;
memory_export.init(*m_device, alloc_info);
// Bind exported memory
buffer_export.bind_memory(memory_export, 0);
#ifdef _WIN32
// Export memory to handle
auto vkGetMemoryWin32HandleKHR = (PFN_vkGetMemoryWin32HandleKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryWin32HandleKHR");
ASSERT_TRUE(vkGetMemoryWin32HandleKHR != nullptr);
VkMemoryGetWin32HandleInfoKHR mghi = {VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR, nullptr, memory_export.handle(),
handle_type};
HANDLE handle;
ASSERT_VK_SUCCESS(vkGetMemoryWin32HandleKHR(m_device->device(), &mghi, &handle));
VkImportMemoryWin32HandleInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR, nullptr, handle_type,
handle};
#else
// Export memory to fd
auto vkGetMemoryFdKHR = (PFN_vkGetMemoryFdKHR)vkGetInstanceProcAddr(instance(), "vkGetMemoryFdKHR");
ASSERT_TRUE(vkGetMemoryFdKHR != nullptr);
VkMemoryGetFdInfoKHR mgfi = {VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR, nullptr, memory_export.handle(), handle_type};
int fd;
ASSERT_VK_SUCCESS(vkGetMemoryFdKHR(m_device->device(), &mgfi, &fd));
VkImportMemoryFdInfoKHR import_info = {VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR, nullptr, handle_type, fd};
#endif
// Import memory
alloc_info = vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_import.memory_requirements(), mem_flags);
alloc_info.pNext = &import_info;
vk_testing::DeviceMemory memory_import;
memory_import.init(*m_device, alloc_info);
// Bind imported memory
buffer_import.bind_memory(memory_import, 0);
// Create test buffers and fill input buffer
VkMemoryPropertyFlags mem_prop = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
VkBufferObj buffer_input;
buffer_input.init_as_src_and_dst(*m_device, buffer_size, mem_prop);
auto input_mem = (uint8_t *)buffer_input.memory().map();
for (uint32_t i = 0; i < buffer_size; i++) {
input_mem[i] = (i & 0xFF);
}
buffer_input.memory().unmap();
VkBufferObj buffer_output;
buffer_output.init_as_src_and_dst(*m_device, buffer_size, mem_prop);
// Copy from input buffer to output buffer through the exported/imported memory
m_commandBuffer->begin();
VkBufferCopy copy_info = {0, 0, buffer_size};
vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_input.handle(), buffer_export.handle(), 1, &copy_info);
// Insert memory barrier to guarantee copy order
VkMemoryBarrier mem_barrier = {VK_STRUCTURE_TYPE_MEMORY_BARRIER, nullptr, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT};
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 1,
&mem_barrier, 0, nullptr, 0, nullptr);
vkCmdCopyBuffer(m_commandBuffer->handle(), buffer_import.handle(), buffer_output.handle(), 1, &copy_info);
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateRenderPass) {
TEST_DESCRIPTION("Verify error messages for supported and unsupported sample counts in render pass attachments.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess();
std::vector<VkAttachmentDescription> attachments;
{
VkAttachmentDescription att = {};
att.format = VK_FORMAT_R8G8B8A8_UNORM;
att.samples = VK_SAMPLE_COUNT_1_BIT;
att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
att.finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachments.push_back(att);
att.format = VK_FORMAT_D16_UNORM;
att.samples = VK_SAMPLE_COUNT_4_BIT;
att.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
att.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
att.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE;
att.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
att.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachments.push_back(att);
}
VkAttachmentReference color_ref = {};
color_ref.attachment = 0;
color_ref.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
VkAttachmentReference depth_ref = {};
depth_ref.attachment = 1;
depth_ref.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &color_ref;
subpass.pDepthStencilAttachment = &depth_ref;
VkRenderPassCreateInfo rp_info = {};
rp_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
rp_info.attachmentCount = attachments.size();
rp_info.pAttachments = attachments.data();
rp_info.subpassCount = 1;
rp_info.pSubpasses = &subpass;
vkCreateRenderPass(device(), &rp_info, NULL, &m_renderPass);
m_errorMonitor->VerifyNotFound();
// Expect an error message for invalid sample counts
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkSubpassDescription-pColorAttachments-01506");
attachments[0].samples = VK_SAMPLE_COUNT_4_BIT;
attachments[1].samples = VK_SAMPLE_COUNT_1_BIT;
{
VkRenderPass render_pass;
VkResult err = vkCreateRenderPass(device(), &rp_info, NULL, &render_pass);
m_errorMonitor->VerifyFound();
ASSERT_NE(err, VK_SUCCESS);
}
}
TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateGraphicsPipeline) {
TEST_DESCRIPTION("Verify an error message for an incorrect graphics pipeline rasterization sample count.");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME);
} else {
printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkRenderpassObj render_pass(m_device);
const VkPipelineLayoutObj pipeline_layout(m_device);
VkShaderObj vs(m_device, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, this);
VkShaderObj fs(m_device, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, this);
// Set a mismatched sample count
VkPipelineMultisampleStateCreateInfo ms_state_ci = {};
ms_state_ci.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT;
VkPipelineObj pipe(m_device);
pipe.AddShader(&vs);
pipe.AddShader(&fs);
pipe.AddDefaultColorAttachment();
pipe.SetMSAA(&ms_state_ci);
m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505");
pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle());
m_errorMonitor->VerifyFound();
}
TEST_F(VkPositiveLayerTest, ParameterLayerFeatures2Capture) {
TEST_DESCRIPTION("Ensure parameter_validation_layer correctly captures physical device features");
if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
} else {
printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR =
(PFN_vkGetPhysicalDeviceFeatures2KHR)vkGetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR");
ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr);
VkResult err;
m_errorMonitor->ExpectSuccess();
VkPhysicalDeviceFeatures2KHR features2;
features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR;
features2.pNext = nullptr;
vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2);
// We're not creating a valid m_device, but the phy wrapper is useful
vk_testing::PhysicalDevice physical_device(gpu());
vk_testing::QueueCreateInfoArray queue_info(physical_device.queue_properties());
// Only request creation with queuefamilies that have at least one queue
std::vector<VkDeviceQueueCreateInfo> create_queue_infos;
auto qci = queue_info.data();
for (uint32_t i = 0; i < queue_info.size(); ++i) {
if (qci[i].queueCount) {
create_queue_infos.push_back(qci[i]);
}
}
VkDeviceCreateInfo dev_info = {};
dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
dev_info.pNext = &features2;
dev_info.flags = 0;
dev_info.queueCreateInfoCount = create_queue_infos.size();
dev_info.pQueueCreateInfos = create_queue_infos.data();
dev_info.enabledLayerCount = 0;
dev_info.ppEnabledLayerNames = nullptr;
dev_info.enabledExtensionCount = 0;
dev_info.ppEnabledExtensionNames = nullptr;
dev_info.pEnabledFeatures = nullptr;
VkDevice device;
err = vkCreateDevice(gpu(), &dev_info, nullptr, &device);
ASSERT_VK_SUCCESS(err);
if (features2.features.samplerAnisotropy) {
// Test that the parameter layer is caching the features correctly using CreateSampler
VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo();
// If the features were not captured correctly, this should cause an error
sampler_ci.anisotropyEnable = VK_TRUE;
sampler_ci.maxAnisotropy = physical_device.properties().limits.maxSamplerAnisotropy;
VkSampler sampler = VK_NULL_HANDLE;
err = vkCreateSampler(device, &sampler_ci, nullptr, &sampler);
ASSERT_VK_SUCCESS(err);
vkDestroySampler(device, sampler, nullptr);
} else {
printf("%s Feature samplerAnisotropy not enabled; parameter_layer check skipped.\n", kSkipPrefix);
}
// Verify the core validation layer has captured the physical device features by creating a a query pool.
if (features2.features.pipelineStatisticsQuery) {
VkQueryPool query_pool;
VkQueryPoolCreateInfo qpci{};
qpci.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
qpci.queryType = VK_QUERY_TYPE_PIPELINE_STATISTICS;
qpci.queryCount = 1;
err = vkCreateQueryPool(device, &qpci, nullptr, &query_pool);
ASSERT_VK_SUCCESS(err);
vkDestroyQueryPool(device, query_pool, nullptr);
} else {
printf("%s Feature pipelineStatisticsQuery not enabled; core_validation_layer check skipped.\n", kSkipPrefix);
}
vkDestroyDevice(device, nullptr);
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, GetMemoryRequirements2) {
TEST_DESCRIPTION(
"Get memory requirements with VK_KHR_get_memory_requirements2 instead of core entry points and verify layers do not emit "
"errors when objects are bound and used");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for VK_KHR_get_memory_requirementes2 extensions
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT);
// Create a test buffer
VkBufferObj buffer;
buffer.init_no_mem(*m_device,
VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT));
// Use extension to get buffer memory requirements
auto vkGetBufferMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetBufferMemoryRequirements2KHR>(
vkGetDeviceProcAddr(m_device->device(), "vkGetBufferMemoryRequirements2KHR"));
ASSERT_TRUE(vkGetBufferMemoryRequirements2KHR != nullptr);
VkBufferMemoryRequirementsInfo2KHR buffer_info = {VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr,
buffer.handle()};
VkMemoryRequirements2KHR buffer_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR};
vkGetBufferMemoryRequirements2KHR(m_device->device(), &buffer_info, &buffer_reqs);
// Allocate and bind buffer memory
vk_testing::DeviceMemory buffer_memory;
buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer_reqs.memoryRequirements, 0));
vkBindBufferMemory(m_device->device(), buffer.handle(), buffer_memory.handle(), 0);
// Create a test image
auto image_ci = vk_testing::Image::create_info();
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.extent.width = 32;
image_ci.extent.height = 32;
image_ci.format = VK_FORMAT_R8G8B8A8_UNORM;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image image;
image.init_no_mem(*m_device, image_ci);
// Use extension to get image memory requirements
auto vkGetImageMemoryRequirements2KHR = reinterpret_cast<PFN_vkGetImageMemoryRequirements2KHR>(
vkGetDeviceProcAddr(m_device->device(), "vkGetImageMemoryRequirements2KHR"));
ASSERT_TRUE(vkGetImageMemoryRequirements2KHR != nullptr);
VkImageMemoryRequirementsInfo2KHR image_info = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR, nullptr,
image.handle()};
VkMemoryRequirements2KHR image_reqs = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR};
vkGetImageMemoryRequirements2KHR(m_device->device(), &image_info, &image_reqs);
// Allocate and bind image memory
vk_testing::DeviceMemory image_memory;
image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image_reqs.memoryRequirements, 0));
vkBindImageMemory(m_device->device(), image.handle(), image_memory.handle(), 0);
// Now execute arbitrary commands that use the test buffer and image
m_commandBuffer->begin();
// Fill buffer with 0
vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0);
// Transition and clear image
const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT);
const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, subresource_range);
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
const VkClearColorValue color = {};
vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range);
// Submit and verify no validation errors
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, BindMemory2) {
TEST_DESCRIPTION(
"Bind memory with VK_KHR_bind_memory2 instead of core entry points and verify layers do not emit errors when objects are "
"used");
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
// Check for VK_KHR_get_memory_requirementes2 extensions
if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME)) {
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
} else {
printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
m_errorMonitor->ExpectSuccess(VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT);
// Create a test buffer
VkBufferObj buffer;
buffer.init_no_mem(*m_device, VkBufferObj::create_info(1024, VK_BUFFER_USAGE_TRANSFER_DST_BIT));
// Allocate buffer memory
vk_testing::DeviceMemory buffer_memory;
buffer_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, buffer.memory_requirements(), 0));
// Bind buffer memory with extension
auto vkBindBufferMemory2KHR =
reinterpret_cast<PFN_vkBindBufferMemory2KHR>(vkGetDeviceProcAddr(m_device->device(), "vkBindBufferMemory2KHR"));
ASSERT_TRUE(vkBindBufferMemory2KHR != nullptr);
VkBindBufferMemoryInfoKHR buffer_bind_info = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR, nullptr, buffer.handle(),
buffer_memory.handle(), 0};
vkBindBufferMemory2KHR(m_device->device(), 1, &buffer_bind_info);
// Create a test image
auto image_ci = vk_testing::Image::create_info();
image_ci.imageType = VK_IMAGE_TYPE_2D;
image_ci.extent.width = 32;
image_ci.extent.height = 32;
image_ci.format = VK_FORMAT_R8G8B8A8_UNORM;
image_ci.tiling = VK_IMAGE_TILING_OPTIMAL;
image_ci.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
vk_testing::Image image;
image.init_no_mem(*m_device, image_ci);
// Allocate image memory
vk_testing::DeviceMemory image_memory;
image_memory.init(*m_device, vk_testing::DeviceMemory::get_resource_alloc_info(*m_device, image.memory_requirements(), 0));
// Bind image memory with extension
auto vkBindImageMemory2KHR =
reinterpret_cast<PFN_vkBindImageMemory2KHR>(vkGetDeviceProcAddr(m_device->device(), "vkBindImageMemory2KHR"));
ASSERT_TRUE(vkBindImageMemory2KHR != nullptr);
VkBindImageMemoryInfoKHR image_bind_info = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR, nullptr, image.handle(),
image_memory.handle(), 0};
vkBindImageMemory2KHR(m_device->device(), 1, &image_bind_info);
// Now execute arbitrary commands that use the test buffer and image
m_commandBuffer->begin();
// Fill buffer with 0
vkCmdFillBuffer(m_commandBuffer->handle(), buffer.handle(), 0, VK_WHOLE_SIZE, 0);
// Transition and clear image
const auto subresource_range = image.subresource_range(VK_IMAGE_ASPECT_COLOR_BIT);
const auto barrier = image.image_memory_barrier(0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_GENERAL, subresource_range);
vkCmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0,
nullptr, 0, nullptr, 1, &barrier);
const VkClearColorValue color = {};
vkCmdClearColorImage(m_commandBuffer->handle(), image.handle(), VK_IMAGE_LAYOUT_GENERAL, &color, 1, &subresource_range);
// Submit and verify no validation errors
m_commandBuffer->end();
m_commandBuffer->QueueCommandBuffer();
m_errorMonitor->VerifyNotFound();
}
TEST_F(VkPositiveLayerTest, MultiplaneImageTests) {
TEST_DESCRIPTION("Positive test of multiplane image operations");
// Enable KHR multiplane req'd extensions
bool mp_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION);
if (mp_extensions) {
m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME);
}
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE1_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
mp_extensions = mp_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
if (mp_extensions) {
m_device_extension_names.push_back(VK_KHR_MAINTENANCE1_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_BIND_MEMORY_2_EXTENSION_NAME);
m_device_extension_names.push_back(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME);
} else {
printf("%s test requires KHR multiplane extensions, not available. Skipping.\n", kSkipPrefix);
return;
}
ASSERT_NO_FATAL_FAILURE(InitState());
VkImageCreateInfo ci = {};
ci.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
ci.pNext = NULL;
ci.flags = 0;
ci.imageType = VK_IMAGE_TYPE_2D;
ci.format = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR;
ci.tiling = VK_IMAGE_TILING_OPTIMAL;
ci.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
ci.extent = {128, 128, 1};
ci.mipLevels = 1;
ci.arrayLayers = 1;
ci.samples = VK_SAMPLE_COUNT_1_BIT;
ci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
ci.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Verify format
VkFormatFeatureFlags features = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT | VK_FORMAT_FEATURE_TRANSFER_DST_BIT;
bool supported = ImageFormatAndFeaturesSupported(instance(), gpu(), ci, features);
if (!supported) {
printf("%s Multiplane image format not supported. Skipping test.\n", kSkipPrefix);
return; // Assume there's low ROI on searching for different mp formats
}
VkImage image;
ASSERT_VK_SUCCESS(vkCreateImage(device(), &ci, NULL, &image));
// Allocate & bind memory
VkPhysicalDeviceMemoryProperties phys_mem_props;
vkGetPhysicalDeviceMemoryProperties(gpu(), &phys_mem_props);
VkMemoryRequirements mem_reqs;
vkGetImageMemoryRequirements(device(), image, &mem_reqs);
VkDeviceMemory mem_obj = VK_NULL_HANDLE;
VkMemoryPropertyFlagBits mem_props = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
for (uint32_t type = 0; type < phys_mem_props.memoryTypeCount; type++) {
if ((mem_reqs.memoryTypeBits & (1 << type)) &&
((phys_mem_props.memoryTypes[type].propertyFlags & mem_props) == mem_props)) {
VkMemoryAllocateInfo alloc_info = {};
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.allocationSize = mem_reqs.size;
alloc_info.memoryTypeIndex = type;
ASSERT_VK_SUCCESS(vkAllocateMemory(device(), &alloc_info, NULL, &mem_obj));
break;
}
}
if (VK_NULL_HANDLE == mem_obj) {
printf("%s Unable to allocate image memory. Skipping test.\n", kSkipPrefix);
vkDestroyImage(device(), image, NULL);
return;
}
ASSERT_VK_SUCCESS(vkBindImageMemory(device(), image, mem_obj, 0));
// Copy plane 0 to plane 2
VkImageCopy copyRegion = {};
copyRegion.srcSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_0_BIT_KHR;
copyRegion.srcSubresource.mipLevel = 0;
copyRegion.srcSubresource.baseArrayLayer = 0;
copyRegion.srcSubresource.layerCount = 1;
copyRegion.srcOffset = {0, 0, 0};
copyRegion.dstSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
copyRegion.dstSubresource.mipLevel = 0;
copyRegion.dstSubresource.baseArrayLayer = 0;
copyRegion.dstSubresource.layerCount = 1;
copyRegion.dstOffset = {0, 0, 0};
copyRegion.extent.width = 128;
copyRegion.extent.height = 128;
copyRegion.extent.depth = 1;
m_errorMonitor->ExpectSuccess();
m_commandBuffer->begin();
m_commandBuffer->CopyImage(image, VK_IMAGE_LAYOUT_GENERAL, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copyRegion);
m_commandBuffer->end();
m_errorMonitor->VerifyNotFound();
#if 0
// Copy to/from buffer
VkBufferCreateInfo bci = {};
bci.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bci.pNext = NULL;
bci.size = 128 * 128 * 3;
bci.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
bci.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VkBuffer buffer;
ASSERT_VK_SUCCESS(vkCreateBuffer(device(), &bci, NULL, &buffer));
VkBufferImageCopy copy_region = {};
copy_region.bufferRowLength = 128;
copy_region.bufferImageHeight = 128;
copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_1_BIT_KHR;
copy_region.imageSubresource.layerCount = 1;
copy_region.imageExtent.height = 64;
copy_region.imageExtent.width = 64;
copy_region.imageExtent.depth = 1;
m_errorMonitor->ExpectSuccess();
vkCmdCopyImageToBuffer(m_commandBuffer->handle(), image,VK_IMAGE_LAYOUT_GENERAL, buffer, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
m_errorMonitor->ExpectSuccess();
copy_region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_PLANE_2_BIT_KHR;
vkCmdCopyBufferToImage(m_commandBuffer->handle(), buffer, image, VK_IMAGE_LAYOUT_GENERAL, 1, &copy_region);
m_errorMonitor->VerifyNotFound();
#endif
vkFreeMemory(device(), mem_obj, NULL);
vkDestroyImage(device(), image, NULL);
}
TEST_F(VkPositiveLayerTest, ApiVersionZero) {
TEST_DESCRIPTION("Check that apiVersion = 0 is valid.");
m_errorMonitor->ExpectSuccess();
app_info.apiVersion = 0U;
ASSERT_NO_FATAL_FAILURE(InitFramework(myDbgFunc, m_errorMonitor));
m_errorMonitor->VerifyNotFound();
}
#if defined(ANDROID) && defined(VALIDATION_APK)
const char *appTag = "VulkanLayerValidationTests";
static bool initialized = false;
static bool active = false;
// Convert Intents to argv
// Ported from Hologram sample, only difference is flexible key
std::vector<std::string> get_args(android_app &app, const char *intent_extra_data_key) {
std::vector<std::string> args;
JavaVM &vm = *app.activity->vm;
JNIEnv *p_env;
if (vm.AttachCurrentThread(&p_env, nullptr) != JNI_OK) return args;
JNIEnv &env = *p_env;
jobject activity = app.activity->clazz;
jmethodID get_intent_method = env.GetMethodID(env.GetObjectClass(activity), "getIntent", "()Landroid/content/Intent;");
jobject intent = env.CallObjectMethod(activity, get_intent_method);
jmethodID get_string_extra_method =
env.GetMethodID(env.GetObjectClass(intent), "getStringExtra", "(Ljava/lang/String;)Ljava/lang/String;");
jvalue get_string_extra_args;
get_string_extra_args.l = env.NewStringUTF(intent_extra_data_key);
jstring extra_str = static_cast<jstring>(env.CallObjectMethodA(intent, get_string_extra_method, &get_string_extra_args));
std::string args_str;
if (extra_str) {
const char *extra_utf = env.GetStringUTFChars(extra_str, nullptr);
args_str = extra_utf;
env.ReleaseStringUTFChars(extra_str, extra_utf);
env.DeleteLocalRef(extra_str);
}
env.DeleteLocalRef(get_string_extra_args.l);
env.DeleteLocalRef(intent);
vm.DetachCurrentThread();
// split args_str
std::stringstream ss(args_str);
std::string arg;
while (std::getline(ss, arg, ' ')) {
if (!arg.empty()) args.push_back(arg);
}
return args;
}
void addFullTestCommentIfPresent(const ::testing::TestInfo &test_info, std::string &error_message) {
const char *const type_param = test_info.type_param();
const char *const value_param = test_info.value_param();
if (type_param != NULL || value_param != NULL) {
error_message.append(", where ");
if (type_param != NULL) {
error_message.append("TypeParam = ").append(type_param);
if (value_param != NULL) error_message.append(" and ");
}
if (value_param != NULL) {
error_message.append("GetParam() = ").append(value_param);
}
}
}
// Inspired by https://github.com/google/googletest/blob/master/googletest/docs/AdvancedGuide.md
class LogcatPrinter : public ::testing::EmptyTestEventListener {
// Called before a test starts.
virtual void OnTestStart(const ::testing::TestInfo &test_info) {
__android_log_print(ANDROID_LOG_INFO, appTag, "[ RUN ] %s.%s", test_info.test_case_name(), test_info.name());
}
// Called after a failed assertion or a SUCCEED() invocation.
virtual void OnTestPartResult(const ::testing::TestPartResult &result) {
// If the test part succeeded, we don't need to do anything.
if (result.type() == ::testing::TestPartResult::kSuccess) return;
__android_log_print(ANDROID_LOG_INFO, appTag, "%s in %s:%d %s", result.failed() ? "*** Failure" : "Success",
result.file_name(), result.line_number(), result.summary());
}
// Called after a test ends.
virtual void OnTestEnd(const ::testing::TestInfo &info) {
std::string result;
if (info.result()->Passed()) {
result.append("[ OK ]");
} else {
result.append("[ FAILED ]");
}
result.append(info.test_case_name()).append(".").append(info.name());
if (info.result()->Failed()) addFullTestCommentIfPresent(info, result);
if (::testing::GTEST_FLAG(print_time)) {
std::ostringstream os;
os << info.result()->elapsed_time();
result.append(" (").append(os.str()).append(" ms)");
}
__android_log_print(ANDROID_LOG_INFO, appTag, "%s", result.c_str());
};
};
static int32_t processInput(struct android_app *app, AInputEvent *event) { return 0; }
static void processCommand(struct android_app *app, int32_t cmd) {
switch (cmd) {
case APP_CMD_INIT_WINDOW: {
if (app->window) {
initialized = true;
}
break;
}
case APP_CMD_GAINED_FOCUS: {
active = true;
break;
}
case APP_CMD_LOST_FOCUS: {
active = false;
break;
}
}
}
void android_main(struct android_app *app) {
int vulkanSupport = InitVulkan();
if (vulkanSupport == 0) {
__android_log_print(ANDROID_LOG_INFO, appTag, "==== FAILED ==== No Vulkan support found");
return;
}
app->onAppCmd = processCommand;
app->onInputEvent = processInput;
while (1) {
int events;
struct android_poll_source *source;
while (ALooper_pollAll(active ? 0 : -1, NULL, &events, (void **)&source) >= 0) {
if (source) {
source->process(app, source);
}
if (app->destroyRequested != 0) {
VkTestFramework::Finish();
return;
}
}
if (initialized && active) {
// Use the following key to send arguments to gtest, i.e.
// --es args "--gtest_filter=-VkLayerTest.foo"
const char key[] = "args";
std::vector<std::string> args = get_args(*app, key);
std::string filter = "";
if (args.size() > 0) {
__android_log_print(ANDROID_LOG_INFO, appTag, "Intent args = %s", args[0].c_str());
filter += args[0];
} else {
__android_log_print(ANDROID_LOG_INFO, appTag, "No Intent args detected");
}
int argc = 2;
char *argv[] = {(char *)"foo", (char *)filter.c_str()};
__android_log_print(ANDROID_LOG_DEBUG, appTag, "filter = %s", argv[1]);
// Route output to files until we can override the gtest output
freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/out.txt", "w", stdout);
freopen("/sdcard/Android/data/com.example.VulkanLayerValidationTests/files/err.txt", "w", stderr);
::testing::InitGoogleTest(&argc, argv);
::testing::TestEventListeners &listeners = ::testing::UnitTest::GetInstance()->listeners();
listeners.Append(new LogcatPrinter);
VkTestFramework::InitArgs(&argc, argv);
::testing::AddGlobalTestEnvironment(new TestEnvironment);
int result = RUN_ALL_TESTS();
if (result != 0) {
__android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests FAILED ====");
} else {
__android_log_print(ANDROID_LOG_INFO, appTag, "==== Tests PASSED ====");
}
VkTestFramework::Finish();
fclose(stdout);
fclose(stderr);
ANativeActivity_finish(app->activity);
return;
}
}
}
#endif
#if defined(_WIN32) && !defined(NDEBUG)
#include <crtdbg.h>
#endif
int main(int argc, char **argv) {
int result;
#ifdef ANDROID
int vulkanSupport = InitVulkan();
if (vulkanSupport == 0) return 1;
#endif
#if defined(_WIN32) && !defined(NDEBUG)
_CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE);
_CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
#endif
::testing::InitGoogleTest(&argc, argv);
VkTestFramework::InitArgs(&argc, argv);
::testing::AddGlobalTestEnvironment(new TestEnvironment);
result = RUN_ALL_TESTS();
VkTestFramework::Finish();
return result;
}