layers: Convert CV to use layer chassis
Change-Id: I990db304e096f832eee9a7cc8fb810490afdcb04
diff --git a/layers/CMakeLists.txt b/layers/CMakeLists.txt
index e1122c3..eb3dbe3 100644
--- a/layers/CMakeLists.txt
+++ b/layers/CMakeLists.txt
@@ -192,7 +192,7 @@
# Inter-layer dependencies are temporarily necessary to serialize layer builds which avoids contention for common generated files
if(BUILD_LAYERS)
- AddVkLayer(core_validation "" core_dispatch.cpp core_validation.cpp convert_to_renderpass2.cpp descriptor_sets.cpp buffer_validation.cpp shader_validation.cpp gpu_validation.cpp xxhash.c)
+ AddVkLayer(core_validation "BUILD_CORE_VALIDATION" chassis.cpp layer_chassis_dispatch.cpp core_validation.cpp convert_to_renderpass2.cpp descriptor_sets.cpp buffer_validation.cpp shader_validation.cpp gpu_validation.cpp xxhash.c)
add_dependencies(VkLayer_core_validation VulkanVL_generate_chassis_files)
AddVkLayer(object_lifetimes "BUILD_OBJECT_TRACKER" object_tracker.cpp object_tracker.h object_tracker_utils.cpp chassis.cpp layer_chassis_dispatch.cpp)
add_dependencies(VkLayer_object_lifetimes VkLayer_core_validation)
diff --git a/layers/buffer_validation.cpp b/layers/buffer_validation.cpp
index 05102a9..6c84e96 100644
--- a/layers/buffer_validation.cpp
+++ b/layers/buffer_validation.cpp
@@ -34,19 +34,12 @@
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
+#include "chassis.h"
#include "core_validation.h"
#include "shader_validation.h"
#include "descriptor_sets.h"
#include "buffer_validation.h"
-namespace core_validation {
-extern unordered_map<void *, layer_data *> layer_data_map;
-extern unordered_map<void *, instance_layer_data *> instance_layer_data_map;
-}; // namespace core_validation
-
-using core_validation::instance_layer_data_map;
-using core_validation::layer_data_map;
-
uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) {
// uint cast applies floor()
return 1u + (uint32_t)log2(std::max({height, width, depth}));
@@ -56,7 +49,8 @@
uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); }
-void SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
+void CoreChecks::SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair,
+ const VkImageLayout &layout) {
auto it = pCB->imageLayoutMap.find(imgpair);
if (it != pCB->imageLayoutMap.end()) {
it->second.layout = layout;
@@ -69,8 +63,10 @@
SetLayout(device_data, pCB, imgpair, {node.initialLayout, layout});
}
}
+
template <class OBJECT, class LAYOUT>
-void SetLayout(layer_data *device_data, OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
+void CoreChecks::SetLayout(layer_data *device_data, OBJECT *pObject, VkImage image, VkImageSubresource range,
+ const LAYOUT &layout) {
ImageSubresourcePair imgpair = {image, true, range};
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
SetLayout(device_data, pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
@@ -84,8 +80,8 @@
}
template <class OBJECT, class LAYOUT>
-void SetLayout(layer_data *device_data, OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout,
- VkImageAspectFlags aspectMask) {
+void CoreChecks::SetLayout(layer_data *device_data, OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout,
+ VkImageAspectFlags aspectMask) {
if (imgpair.subresource.aspectMask & aspectMask) {
imgpair.subresource.aspectMask = aspectMask;
SetLayout(device_data, pObject, imgpair, layout);
@@ -93,8 +89,8 @@
}
// Set the layout in supplied map
-void SetLayout(std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair,
- VkImageLayout layout) {
+void CoreChecks::SetLayout(std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap,
+ ImageSubresourcePair imgpair, VkImageLayout layout) {
auto it = imageLayoutMap.find(imgpair);
if (it != imageLayoutMap.end()) {
it->second.layout = layout; // Update
@@ -103,10 +99,8 @@
}
}
-bool FindLayoutVerifyNode(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, ImageSubresourcePair imgpair,
- IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
-
+bool CoreChecks::FindLayoutVerifyNode(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, ImageSubresourcePair imgpair,
+ IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
@@ -135,16 +129,15 @@
return true;
}
-bool FindLayoutVerifyLayout(layer_data const *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout,
- const VkImageAspectFlags aspectMask) {
+bool CoreChecks::FindLayoutVerifyLayout(layer_data const *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout,
+ const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
imgpair.subresource.aspectMask = aspectMask;
- auto imgsubIt = (*core_validation::GetImageLayoutMap(device_data)).find(imgpair);
- if (imgsubIt == (*core_validation::GetImageLayoutMap(device_data)).end()) {
+ auto imgsubIt = (*GetImageLayoutMap(device_data)).find(imgpair);
+ if (imgsubIt == (*GetImageLayoutMap(device_data)).end()) {
return false;
}
if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
@@ -159,8 +152,8 @@
}
// Find layout(s) on the command buffer level
-bool FindCmdBufLayout(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, VkImage image, VkImageSubresource range,
- IMAGE_CMD_BUF_LAYOUT_NODE &node) {
+bool CoreChecks::FindCmdBufLayout(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, VkImage image, VkImageSubresource range,
+ IMAGE_CMD_BUF_LAYOUT_NODE &node) {
ImageSubresourcePair imgpair = {image, true, range};
node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
FindLayoutVerifyNode(device_data, pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
@@ -183,7 +176,7 @@
}
// Find layout(s) on the global level
-bool FindGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
+bool CoreChecks::FindGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayoutVerifyLayout(device_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
@@ -196,16 +189,16 @@
}
if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
imgpair = {imgpair.image, false, VkImageSubresource()};
- auto imgsubIt = (*core_validation::GetImageLayoutMap(device_data)).find(imgpair);
- if (imgsubIt == (*core_validation::GetImageLayoutMap(device_data)).end()) return false;
+ auto imgsubIt = (*GetImageLayoutMap(device_data)).find(imgpair);
+ if (imgsubIt == (*GetImageLayoutMap(device_data)).end()) return false;
layout = imgsubIt->second.layout;
}
return true;
}
-bool FindLayouts(layer_data *device_data, VkImage image, std::vector<VkImageLayout> &layouts) {
- auto sub_data = (*core_validation::GetImageSubresourceMap(device_data)).find(image);
- if (sub_data == (*core_validation::GetImageSubresourceMap(device_data)).end()) return false;
+bool CoreChecks::FindLayouts(layer_data *device_data, VkImage image, std::vector<VkImageLayout> &layouts) {
+ auto sub_data = (*GetImageSubresourceMap(device_data)).find(image);
+ if (sub_data == (*GetImageSubresourceMap(device_data)).end()) return false;
auto image_state = GetImageState(device_data, image);
if (!image_state) return false;
bool ignoreGlobal = false;
@@ -215,15 +208,16 @@
}
for (auto imgsubpair : sub_data->second) {
if (ignoreGlobal && !imgsubpair.hasSubresource) continue;
- auto img_data = (*core_validation::GetImageLayoutMap(device_data)).find(imgsubpair);
- if (img_data != (*core_validation::GetImageLayoutMap(device_data)).end()) {
+ auto img_data = (*GetImageLayoutMap(device_data)).find(imgsubpair);
+ if (img_data != (*GetImageLayoutMap(device_data)).end()) {
layouts.push_back(img_data->second.layout);
}
}
return true;
}
-bool FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair,
- VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
+
+bool CoreChecks::FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap,
+ ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
if (!(imgpair.subresource.aspectMask & aspectMask)) {
return false;
}
@@ -237,8 +231,9 @@
}
// find layout in supplied map
-bool FindLayout(layer_data *device_data, const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap,
- ImageSubresourcePair imgpair, VkImageLayout &layout) {
+bool CoreChecks::FindLayout(layer_data *device_data,
+ const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap,
+ ImageSubresourcePair imgpair, VkImageLayout &layout) {
layout = VK_IMAGE_LAYOUT_MAX_ENUM;
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
FindLayout(imageLayoutMap, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
@@ -260,16 +255,16 @@
}
// Set the layout on the global level
-void SetGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
+void CoreChecks::SetGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
VkImage &image = imgpair.image;
- auto &lmap = (*core_validation::GetImageLayoutMap(device_data));
+ auto &lmap = (*GetImageLayoutMap(device_data));
auto data = lmap.find(imgpair);
if (data != lmap.end()) {
data->second.layout = layout; // Update
} else {
lmap[imgpair].layout = layout; // Insert
}
- auto &image_subresources = (*core_validation::GetImageSubresourceMap(device_data))[image];
+ auto &image_subresources = (*GetImageSubresourceMap(device_data))[image];
auto subresource = std::find(image_subresources.begin(), image_subresources.end(), imgpair);
if (subresource == image_subresources.end()) {
image_subresources.push_back(imgpair);
@@ -277,7 +272,8 @@
}
// Set the layout on the cmdbuf level
-void SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
+void CoreChecks::SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair,
+ const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
auto it = pCB->imageLayoutMap.find(imgpair);
if (it != pCB->imageLayoutMap.end()) {
it->second = node; // Update
@@ -286,8 +282,8 @@
}
}
// Set image layout for given VkImageSubresourceRange struct
-void SetImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state,
- VkImageSubresourceRange image_subresource_range, const VkImageLayout &layout) {
+void CoreChecks::SetImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state,
+ VkImageSubresourceRange image_subresource_range, const VkImageLayout &layout) {
assert(image_state);
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
for (uint32_t level_index = 0; level_index < image_subresource_range.levelCount; ++level_index) {
@@ -319,8 +315,8 @@
}
}
// Set image layout for given VkImageSubresourceLayers struct
-void SetImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state,
- VkImageSubresourceLayers image_subresource_layers, const VkImageLayout &layout) {
+void CoreChecks::SetImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *image_state,
+ VkImageSubresourceLayers image_subresource_layers, const VkImageLayout &layout) {
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
VkImageSubresourceRange image_subresource_range;
image_subresource_range.aspectMask = image_subresource_layers.aspectMask;
@@ -332,8 +328,8 @@
}
// Set image layout for all slices of an image view
-void SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state,
- const VkImageLayout &layout) {
+void CoreChecks::SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state,
+ const VkImageLayout &layout) {
assert(view_state);
IMAGE_STATE *image_state = GetImageState(device_data, view_state->create_info.image);
@@ -349,17 +345,18 @@
SetImageLayout(device_data, cb_node, image_state, sub_range, layout);
}
-void SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, VkImageView imageView, const VkImageLayout &layout) {
+void CoreChecks::SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, VkImageView imageView,
+ const VkImageLayout &layout) {
auto view_state = GetImageViewState(device_data, imageView);
SetImageViewLayout(device_data, cb_node, view_state, layout);
}
-bool ValidateRenderPassLayoutAgainstFramebufferImageUsage(layer_data *device_data, RenderPassCreateVersion rp_version,
- VkImageLayout layout, VkImage image, VkImageView image_view,
- VkFramebuffer framebuffer, VkRenderPass renderpass,
- uint32_t attachment_index, const char *variable_name) {
+bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(layer_data *device_data, RenderPassCreateVersion rp_version,
+ VkImageLayout layout, VkImage image, VkImageView image_view,
+ VkFramebuffer framebuffer, VkRenderPass renderpass,
+ uint32_t attachment_index, const char *variable_name) {
bool skip = false;
- const auto report_data = core_validation::GetReportData(device_data);
+ const auto report_data = GetReportData(device_data);
auto image_state = GetImageState(device_data, image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
@@ -455,13 +452,12 @@
return skip;
}
-bool VerifyFramebufferAndRenderPassLayouts(layer_data *device_data, RenderPassCreateVersion rp_version, GLOBAL_CB_NODE *pCB,
- const VkRenderPassBeginInfo *pRenderPassBegin,
- const FRAMEBUFFER_STATE *framebuffer_state) {
+bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(layer_data *device_data, RenderPassCreateVersion rp_version,
+ GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
+ const FRAMEBUFFER_STATE *framebuffer_state) {
bool skip = false;
auto const pRenderPassInfo = GetRenderPassState(device_data, pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebufferInfo = framebuffer_state->createInfo;
- const auto report_data = core_validation::GetReportData(device_data);
auto render_pass = GetRenderPassState(device_data, pRenderPassBegin->renderPass)->renderPass;
auto framebuffer = framebuffer_state->framebuffer;
@@ -574,8 +570,8 @@
return skip;
}
-void TransitionAttachmentRefLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
- const safe_VkAttachmentReference2KHR &ref) {
+void CoreChecks::TransitionAttachmentRefLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
+ const safe_VkAttachmentReference2KHR &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = GetAttachmentImageViewState(device_data, pFramebuffer, ref.attachment);
if (image_view) {
@@ -584,8 +580,8 @@
}
}
-void TransitionSubpassLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB, const RENDER_PASS_STATE *render_pass_state,
- const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
+void CoreChecks::TransitionSubpassLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB, const RENDER_PASS_STATE *render_pass_state,
+ const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
@@ -602,8 +598,9 @@
}
}
-bool ValidateImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE const *pCB, const VkImageMemoryBarrier *mem_barrier,
- uint32_t level, uint32_t layer, VkImageAspectFlags aspect) {
+bool CoreChecks::ValidateImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE const *pCB,
+ const VkImageMemoryBarrier *mem_barrier, uint32_t level, uint32_t layer,
+ VkImageAspectFlags aspect) {
if (!(mem_barrier->subresourceRange.aspectMask & aspect)) {
return false;
}
@@ -617,11 +614,10 @@
// TODO: Set memory invalid which is in mem_tracker currently
} else if (node.layout != mem_barrier->oldLayout) {
skip = log_msg(
- core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(pCB->commandBuffer),
- "VUID-VkImageMemoryBarrier-oldLayout-01197",
+ report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ HandleToUint64(pCB->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197",
"For image %s you cannot transition the layout of aspect=%d level=%d layer=%d from %s when current layout is %s.",
- core_validation::GetReportData(device_data)->FormatHandle(mem_barrier->image).c_str(), aspect, level, layer,
+ report_data->FormatHandle(mem_barrier->image).c_str(), aspect, level, layer,
string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
}
return skip;
@@ -630,8 +626,9 @@
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
-void TransitionBeginRenderPassLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state, const RENDER_PASS_STATE *render_pass_state,
- FRAMEBUFFER_STATE *framebuffer_state) {
+void CoreChecks::TransitionBeginRenderPassLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
+ const RENDER_PASS_STATE *render_pass_state,
+ FRAMEBUFFER_STATE *framebuffer_state) {
// First transition into initialLayout
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
@@ -644,8 +641,9 @@
TransitionSubpassLayouts(device_data, cb_state, render_pass_state, 0, framebuffer_state);
}
-void TransitionImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
- uint32_t level, uint32_t layer, VkImageAspectFlags aspect_mask, VkImageAspectFlags aspect) {
+void CoreChecks::TransitionImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
+ uint32_t level, uint32_t layer, VkImageAspectFlags aspect_mask,
+ VkImageAspectFlags aspect) {
if (!(aspect_mask & aspect)) {
return;
}
@@ -681,9 +679,8 @@
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
-bool ValidateBarrierLayoutToImageUsage(layer_data *device_data, const VkImageMemoryBarrier *img_barrier, bool new_not_old,
- VkImageUsageFlags usage_flags, const char *func_name) {
- const auto report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateBarrierLayoutToImageUsage(layer_data *device_data, const VkImageMemoryBarrier *img_barrier,
+ bool new_not_old, VkImageUsageFlags usage_flags, const char *func_name) {
bool skip = false;
const VkImageLayout layout = (new_not_old) ? img_barrier->newLayout : img_barrier->oldLayout;
const char *msg_code = kVUIDUndefined; // sentinel value meaning "no error"
@@ -750,8 +747,8 @@
using ImageBarrierScoreboardImageMap = std::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Verify image barriers are compatible with the images they reference.
-bool ValidateBarriersToImages(layer_data *device_data, GLOBAL_CB_NODE const *cb_state, uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) {
+bool CoreChecks::ValidateBarriersToImages(layer_data *device_data, GLOBAL_CB_NODE const *cb_state, uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name) {
bool skip = false;
// Scoreboard for duplicate layout transition barriers within the list
@@ -777,14 +774,12 @@
(img_barrier->oldLayout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier->subresourceRange;
skip = log_msg(
- core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, HandleToUint64(cb_state->commandBuffer),
- "VUID-VkImageMemoryBarrier-oldLayout-01197",
+ report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
+ HandleToUint64(cb_state->commandBuffer), "VUID-VkImageMemoryBarrier-oldLayout-01197",
"%s: pImageMemoryBarrier[%u] conflicts with earlier entry pImageMemoryBarrier[%u]. Image %s"
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
- func_name, i, entry.index,
- core_validation::GetReportData(device_data)->FormatHandle(img_barrier->image).c_str(), range.aspectMask,
+ func_name, i, entry.index, report_data->FormatHandle(img_barrier->image).c_str(), range.aspectMask,
range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount,
string_VkImageLayout(img_barrier->oldLayout), string_VkImageLayout(entry.barrier->newLayout));
}
@@ -807,12 +802,12 @@
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= log_msg(
- core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image), 0,
+ report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ HandleToUint64(img_barrier->image), 0,
"Attempting to transition shared presentable image %s"
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
- core_validation::GetReportData(device_data)->FormatHandle(img_barrier->image).c_str(),
- string_VkImageLayout(img_barrier->oldLayout), string_VkImageLayout(img_barrier->newLayout));
+ report_data->FormatHandle(img_barrier->image).c_str(), string_VkImageLayout(img_barrier->oldLayout),
+ string_VkImageLayout(img_barrier->newLayout));
}
}
@@ -822,14 +817,13 @@
auto const aspect_mask = img_barrier->subresourceRange.aspectMask;
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
- skip |= log_msg(core_validation::GetReportData(device_data), VK_DEBUG_REPORT_ERROR_BIT_EXT,
- VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, HandleToUint64(img_barrier->image),
- "VUID-VkImageMemoryBarrier-image-01207",
+ skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
+ HandleToUint64(img_barrier->image), "VUID-VkImageMemoryBarrier-image-01207",
"%s: Image barrier 0x%p references image %s of format %s that must have the depth and stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
func_name, static_cast<const void *>(img_barrier),
- core_validation::GetReportData(device_data)->FormatHandle(img_barrier->image).c_str(),
- string_VkFormat(image_create_info->format), aspect_mask);
+ report_data->FormatHandle(img_barrier->image).c_str(), string_VkFormat(image_create_info->format),
+ aspect_mask);
}
}
uint32_t level_count = ResolveRemainingLevels(&img_barrier->subresourceRange, image_create_info->mipLevels);
@@ -857,19 +851,18 @@
return skip;
}
-static bool IsReleaseOp(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkImageMemoryBarrier const *barrier) {
+bool CoreChecks::IsReleaseOp(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkImageMemoryBarrier const *barrier) {
if (!IsTransferOp(barrier)) return false;
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
- return pool && IsReleaseOp<VkImageMemoryBarrier, true>(pool, barrier);
+ return pool && TempIsReleaseOp<VkImageMemoryBarrier, true>(pool, barrier);
}
template <typename Barrier>
-bool ValidateQFOTransferBarrierUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
- uint32_t barrier_count, const Barrier *barriers) {
+bool CoreChecks::ValidateQFOTransferBarrierUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
+ uint32_t barrier_count, const Barrier *barriers) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
bool skip = false;
- const auto report_data = core_validation::GetReportData(device_data);
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename BarrierRecord::Tag());
const char *barrier_name = BarrierRecord::BarrierName();
@@ -878,7 +871,8 @@
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
const BarrierRecord *barrier_record = nullptr;
- if (IsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) && !IsSpecial(barriers[b].dstQueueFamilyIndex)) {
+ if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer */>(pool, &barriers[b]) &&
+ !IsSpecial(barriers[b].dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barriers[b]);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
@@ -906,12 +900,14 @@
}
template <typename Barrier>
-void RecordQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t barrier_count, const Barrier *barriers) {
+void CoreChecks::RecordQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t barrier_count,
+ const Barrier *barriers) {
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
auto &barrier_sets = GetQFOBarrierSets(cb_state, typename QFOTransferBarrier<Barrier>::Tag());
for (uint32_t b = 0; b < barrier_count; b++) {
if (!IsTransferOp(&barriers[b])) continue;
- if (IsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barriers[b]) && !IsSpecial(barriers[b].dstQueueFamilyIndex)) {
+ if (TempIsReleaseOp<Barrier, true /* Assume IsTransfer*/>(pool, &barriers[b]) &&
+ !IsSpecial(barriers[b].dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barriers[b]);
} else if (IsAcquireOp<Barrier, true /*Assume IsTransfer */>(pool, &barriers[b]) &&
!IsSpecial(barriers[b].srcQueueFamilyIndex)) {
@@ -920,25 +916,26 @@
}
}
-bool ValidateBarriersQFOTransferUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
- uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
- uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers) {
+bool CoreChecks::ValidateBarriersQFOTransferUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
+ uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
+ uint32_t imageMemBarrierCount,
+ const VkImageMemoryBarrier *pImageMemBarriers) {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(device_data, func_name, cb_state, bufferBarrierCount, pBufferMemBarriers);
skip |= ValidateQFOTransferBarrierUniqueness(device_data, func_name, cb_state, imageMemBarrierCount, pImageMemBarriers);
return skip;
}
-void RecordBarriersQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t bufferBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
- const VkImageMemoryBarrier *pImageMemBarriers) {
+void CoreChecks::RecordBarriersQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t bufferBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
+ const VkImageMemoryBarrier *pImageMemBarriers) {
RecordQFOTransferBarriers(device_data, cb_state, bufferBarrierCount, pBufferMemBarriers);
RecordQFOTransferBarriers(device_data, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
template <typename BarrierRecord, typename Scoreboard>
-static bool ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const GLOBAL_CB_NODE *cb_state,
- const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) {
+bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const GLOBAL_CB_NODE *cb_state,
+ const char *operation, const BarrierRecord &barrier, Scoreboard *scoreboard) {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->insert(std::make_pair(barrier, cb_state));
@@ -956,15 +953,14 @@
}
template <typename Barrier>
-static bool ValidateQueuedQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
- QFOTransferCBScoreboards<Barrier> *scoreboards) {
+bool CoreChecks::ValidateQueuedQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
+ QFOTransferCBScoreboards<Barrier> *scoreboards) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
bool skip = false;
- const auto report_data = core_validation::GetReportData(device_data);
+ const auto report_data = device_data->GetReportData(device_data);
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
- const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers =
- core_validation::GetGlobalQFOReleaseBarrierMap(device_data, TypeTag());
+ const GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(device_data, TypeTag());
const char *barrier_name = BarrierRecord::BarrierName();
const char *handle_name = BarrierRecord::HandleName();
// No release should have an extant duplicate (WARNING)
@@ -1007,9 +1003,9 @@
return skip;
}
-bool ValidateQueuedQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
- QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
- QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
+bool CoreChecks::ValidateQueuedQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
+ QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
+ QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
bool skip = false;
skip |= ValidateQueuedQFOTransferBarriers<VkImageMemoryBarrier>(device_data, cb_state, qfo_image_scoreboards);
skip |= ValidateQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(device_data, cb_state, qfo_buffer_scoreboards);
@@ -1017,12 +1013,11 @@
}
template <typename Barrier>
-static void RecordQueuedQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state) {
+void CoreChecks::RecordQueuedQFOTransferBarriers(layer_data *device_data, GLOBAL_CB_NODE *cb_state) {
using BarrierRecord = QFOTransferBarrier<Barrier>;
using TypeTag = typename BarrierRecord::Tag;
const auto &cb_barriers = GetQFOBarrierSets(cb_state, TypeTag());
- GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers =
- core_validation::GetGlobalQFOReleaseBarrierMap(device_data, TypeTag());
+ GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers = GetGlobalQFOReleaseBarrierMap(device_data, TypeTag());
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
@@ -1045,28 +1040,18 @@
}
}
-void RecordQueuedQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state) {
+void CoreChecks::RecordQueuedQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state) {
RecordQueuedQFOTransferBarriers<VkImageMemoryBarrier>(device_data, cb_state);
RecordQueuedQFOTransferBarriers<VkBufferMemoryBarrier>(device_data, cb_state);
}
-// Remove the pending QFO release records from the global set
-// Note that the type of the handle argument constrained to match Barrier type
-// The defaulted BarrierRecord argument allows use to declare the type once, but is not intended to be specified by the caller
-template <typename Barrier, typename BarrierRecord = QFOTransferBarrier<Barrier>>
-static void EraseQFOReleaseBarriers(layer_data *device_data, const typename BarrierRecord::HandleType &handle) {
- GlobalQFOTransferBarrierMap<Barrier> &global_release_barriers =
- core_validation::GetGlobalQFOReleaseBarrierMap(device_data, typename BarrierRecord::Tag());
- global_release_barriers.erase(handle);
-}
-
// Avoid making the template globally visible by exporting the one instance of it we need.
-void EraseQFOImageRelaseBarriers(layer_data *device_data, const VkImage &image) {
+void CoreChecks::EraseQFOImageRelaseBarriers(layer_data *device_data, const VkImage &image) {
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(device_data, image);
}
-void TransitionImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t memBarrierCount,
- const VkImageMemoryBarrier *pImgMemBarriers) {
+void CoreChecks::TransitionImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t memBarrierCount,
+ const VkImageMemoryBarrier *pImgMemBarriers) {
for (uint32_t i = 0; i < memBarrierCount; ++i) {
auto mem_barrier = &pImgMemBarriers[i];
if (!mem_barrier) continue;
@@ -1133,10 +1118,10 @@
}
}
-bool VerifyImageLayout(layer_data const *device_data, GLOBAL_CB_NODE const *cb_node, IMAGE_STATE *image_state,
- VkImageSubresourceLayers subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout,
- const char *caller, const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) {
- const auto report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::VerifyImageLayout(layer_data const *device_data, GLOBAL_CB_NODE const *cb_node, IMAGE_STATE *image_state,
+ VkImageSubresourceLayers subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout,
+ const char *caller, const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code,
+ bool *error) {
const auto image = image_state->image;
bool skip = false;
@@ -1188,8 +1173,9 @@
return skip;
}
-void TransitionFinalSubpassLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
- FRAMEBUFFER_STATE *framebuffer_state) {
+void CoreChecks::TransitionFinalSubpassLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB,
+ const VkRenderPassBeginInfo *pRenderPassBegin,
+ FRAMEBUFFER_STATE *framebuffer_state) {
auto renderPass = GetRenderPassState(device_data, pRenderPassBegin->renderPass);
if (!renderPass) return;
@@ -1211,8 +1197,8 @@
//
// AHB-specific validation within non-AHB APIs
//
-bool ValidateCreateImageANDROID(layer_data *device_data, const debug_report_data *report_data,
- const VkImageCreateInfo *create_info) {
+bool CoreChecks::ValidateCreateImageANDROID(layer_data *device_data, const debug_report_data *report_data,
+ const VkImageCreateInfo *create_info) {
bool skip = false;
const VkExternalFormatANDROID *ext_fmt_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
@@ -1290,7 +1276,7 @@
return skip;
}
-void RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {
+void CoreChecks::RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {
const VkExternalMemoryImageCreateInfo *emici = lvl_find_in_chain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
is_node->imported_ahb = true;
@@ -1302,9 +1288,8 @@
}
}
-bool ValidateCreateImageViewANDROID(layer_data *device_data, const VkImageViewCreateInfo *create_info) {
+bool CoreChecks::ValidateCreateImageViewANDROID(layer_data *device_data, const VkImageViewCreateInfo *create_info) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
IMAGE_STATE *image_state = GetImageState(device_data, create_info->image);
if (image_state->has_ahb_format) {
@@ -1350,9 +1335,8 @@
return skip;
}
-bool ValidateGetImageSubresourceLayoutANDROID(layer_data *device_data, const VkImage image) {
+bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(layer_data *device_data, const VkImage image) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
IMAGE_STATE *image_state = GetImageState(device_data, image);
if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
@@ -1367,24 +1351,23 @@
#else
-bool ValidateCreateImageANDROID(layer_data *device_data, const debug_report_data *report_data,
- const VkImageCreateInfo *create_info) {
+bool CoreChecks::ValidateCreateImageANDROID(layer_data *device_data, const debug_report_data *report_data,
+ const VkImageCreateInfo *create_info) {
return false;
}
-void RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {}
+void CoreChecks::RecordCreateImageANDROID(const VkImageCreateInfo *create_info, IMAGE_STATE *is_node) {}
-bool ValidateCreateImageViewANDROID(layer_data *device_data, const VkImageViewCreateInfo *create_info) { return false; }
+bool CoreChecks::ValidateCreateImageViewANDROID(layer_data *device_data, const VkImageViewCreateInfo *create_info) { return false; }
-bool ValidateGetImageSubresourceLayoutANDROID(layer_data *device_data, const VkImage image) { return false; }
+bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(layer_data *device_data, const VkImage image) { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
-bool PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkImage *pImage) {
- layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), core_validation::layer_data_map);
+bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
+ layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
- const debug_report_data *report_data = GetReportData(device_data);
if (GetDeviceExtensions(device_data)->vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageANDROID(device_data, report_data, pCreateInfo);
@@ -1498,18 +1481,18 @@
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
- skip |= core_validation::ValidateQueueFamilies(
- device_data, pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateImage",
- "pCreateInfo->pQueueFamilyIndices", "VUID-VkImageCreateInfo-sharingMode-01420",
- "VUID-VkImageCreateInfo-sharingMode-01420", false);
+ skip |=
+ ValidateQueueFamilies(device_data, pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
+ "vkCreateImage", "pCreateInfo->pQueueFamilyIndices", "VUID-VkImageCreateInfo-sharingMode-01420",
+ "VUID-VkImageCreateInfo-sharingMode-01420", false);
}
return skip;
}
-void PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkImage *pImage, VkResult result) {
- layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), core_validation::layer_data_map);
+void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
+ layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
IMAGE_LAYOUT_NODE image_state;
image_state.layout = pCreateInfo->initialLayout;
@@ -1520,54 +1503,51 @@
}
GetImageMap(device_data)->insert(std::make_pair(*pImage, std::unique_ptr<IMAGE_STATE>(is_node)));
ImageSubresourcePair subpair{*pImage, false, VkImageSubresource()};
- (*core_validation::GetImageSubresourceMap(device_data))[*pImage].push_back(subpair);
- (*core_validation::GetImageLayoutMap(device_data))[subpair] = image_state;
+ (*GetImageSubresourceMap(device_data))[*pImage].push_back(subpair);
+ (*GetImageLayoutMap(device_data))[subpair] = image_state;
}
-bool PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
- IMAGE_STATE *image_state = core_validation::GetImageState(device_data, image);
+ IMAGE_STATE *image_state = GetImageState(device_data, image);
const VK_OBJECT obj_struct = {HandleToUint64(image), kVulkanObjectTypeImage};
bool skip = false;
if (image_state) {
- skip |= core_validation::ValidateObjectNotInUse(device_data, image_state, obj_struct, "vkDestroyImage",
- "VUID-vkDestroyImage-image-01000");
+ skip |= ValidateObjectNotInUse(device_data, image_state, obj_struct, "vkDestroyImage", "VUID-vkDestroyImage-image-01000");
}
return skip;
}
-void PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!image) return;
- IMAGE_STATE *image_state = core_validation::GetImageState(device_data, image);
+ IMAGE_STATE *image_state = GetImageState(device_data, image);
VK_OBJECT obj_struct = {HandleToUint64(image), kVulkanObjectTypeImage};
- core_validation::InvalidateCommandBuffers(device_data, image_state->cb_bindings, obj_struct);
+ InvalidateCommandBuffers(device_data, image_state->cb_bindings, obj_struct);
// Clean up memory mapping, bindings and range references for image
for (auto mem_binding : image_state->GetBoundMemory()) {
- auto mem_info = core_validation::GetMemObjInfo(device_data, mem_binding);
+ auto mem_info = GetMemObjInfo(device_data, mem_binding);
if (mem_info) {
- core_validation::RemoveImageMemoryRange(obj_struct.handle, mem_info);
+ RemoveImageMemoryRange(obj_struct.handle, mem_info);
}
}
- core_validation::ClearMemoryObjectBindings(device_data, obj_struct.handle, kVulkanObjectTypeImage);
+ ClearMemoryObjectBindings(device_data, obj_struct.handle, kVulkanObjectTypeImage);
EraseQFOReleaseBarriers<VkImageMemoryBarrier>(device_data, image);
// Remove image from imageMap
- core_validation::GetImageMap(device_data)->erase(image);
- std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *imageSubresourceMap =
- core_validation::GetImageSubresourceMap(device_data);
+ GetImageMap(device_data)->erase(image);
+ std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *imageSubresourceMap = GetImageSubresourceMap(device_data);
const auto &sub_entry = imageSubresourceMap->find(image);
if (sub_entry != imageSubresourceMap->end()) {
for (const auto &pair : sub_entry->second) {
- core_validation::GetImageLayoutMap(device_data)->erase(pair);
+ GetImageLayoutMap(device_data)->erase(pair);
}
imageSubresourceMap->erase(sub_entry);
}
}
-bool ValidateImageAttributes(layer_data *device_data, IMAGE_STATE *image_state, VkImageSubresourceRange range) {
+bool CoreChecks::ValidateImageAttributes(layer_data *device_data, IMAGE_STATE *image_state, VkImageSubresourceRange range) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
char const str[] = "vkCmdClearColorImage aspectMasks for all subresource ranges must be set to VK_IMAGE_ASPECT_COLOR_BIT";
@@ -1611,10 +1591,9 @@
return array_layer_count;
}
-bool VerifyClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state,
- VkImageSubresourceRange range, VkImageLayout dest_image_layout, const char *func_name) {
+bool CoreChecks::VerifyClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state,
+ VkImageSubresourceRange range, VkImageLayout dest_image_layout, const char *func_name) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
uint32_t level_count = ResolveRemainingLevels(&range, image_state->createInfo.mipLevels);
uint32_t layer_count = ResolveRemainingLayers(&range, image_state->createInfo.arrayLayers);
@@ -1684,8 +1663,8 @@
return skip;
}
-void RecordClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
- VkImageLayout dest_image_layout) {
+void CoreChecks::RecordClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, VkImage image,
+ VkImageSubresourceRange range, VkImageLayout dest_image_layout) {
VkImageCreateInfo *image_create_info = &(GetImageState(device_data, image)->createInfo);
uint32_t level_count = ResolveRemainingLevels(&range, image_create_info->mipLevels);
uint32_t layer_count = ResolveRemainingLayers(&range, image_create_info->arrayLayers);
@@ -1703,9 +1682,9 @@
}
}
-bool PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearColorValue *pColor, uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges) {
+bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearColorValue *pColor, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = false;
@@ -1734,8 +1713,9 @@
return skip;
}
-void PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
+void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearColorValue *pColor, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
@@ -1748,12 +1728,11 @@
}
}
-bool PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges) {
+bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
auto cb_node = GetCBNode(device_data, commandBuffer);
@@ -1802,9 +1781,9 @@
return skip;
}
-void PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges) {
+void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange *pRanges) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
@@ -1959,13 +1938,12 @@
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
-static inline VkExtent3D GetScaledItg(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
+VkExtent3D CoreChecks::GetScaledItg(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pPool = GetCommandPoolNode(device_data, cb_node->createInfo.commandPool);
if (pPool) {
- granularity =
- GetPhysicalDeviceState(device_data)->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
+ granularity = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
@@ -1986,10 +1964,9 @@
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
-static inline bool CheckItgOffset(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
- const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member,
- const char *vuid) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::CheckItgOffset(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
+ const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member,
+ const char *vuid) {
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
@@ -2020,11 +1997,10 @@
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
-static inline bool CheckItgExtent(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
- const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
- const VkImageType image_type, const uint32_t i, const char *function, const char *member,
- const char *vuid) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::CheckItgExtent(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
+ const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
+ const VkImageType image_type, const uint32_t i, const char *function, const char *member,
+ const char *vuid) {
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
@@ -2079,9 +2055,9 @@
return skip;
}
-bool ValidateImageMipLevel(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
- const uint32_t i, const char *function, const char *member, const char *vuid) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateImageMipLevel(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img,
+ uint32_t mip_level, const uint32_t i, const char *function, const char *member,
+ const char *vuid) {
bool skip = false;
if (mip_level >= img->createInfo.mipLevels) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -2092,10 +2068,9 @@
return skip;
}
-bool ValidateImageArrayLayerRange(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img,
- const uint32_t base_layer, const uint32_t layer_count, const uint32_t i, const char *function,
- const char *member, const char *vuid) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateImageArrayLayerRange(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img,
+ const uint32_t base_layer, const uint32_t layer_count, const uint32_t i,
+ const char *function, const char *member, const char *vuid) {
bool skip = false;
if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers ||
(base_layer + layer_count) > img->createInfo.arrayLayers) {
@@ -2110,9 +2085,9 @@
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy structure
-bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
- const IMAGE_STATE *img, const VkBufferImageCopy *region,
- const uint32_t i, const char *function, const char *vuid) {
+bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
+ const IMAGE_STATE *img, const VkBufferImageCopy *region,
+ const uint32_t i, const char *function, const char *vuid) {
bool skip = false;
VkExtent3D granularity = GetScaledItg(device_data, cb_node, img);
skip |= CheckItgOffset(device_data, cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset", vuid);
@@ -2123,9 +2098,10 @@
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy structure
-bool ValidateCopyImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
- const IMAGE_STATE *src_img, const IMAGE_STATE *dst_img,
- const VkImageCopy *region, const uint32_t i, const char *function) {
+bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
+ const IMAGE_STATE *src_img, const IMAGE_STATE *dst_img,
+ const VkImageCopy *region, const uint32_t i,
+ const char *function) {
bool skip = false;
// Source image checks
VkExtent3D granularity = GetScaledItg(device_data, cb_node, src_img);
@@ -2150,8 +2126,9 @@
}
// Validate contents of a VkImageCopy struct
-bool ValidateImageCopyData(const layer_data *device_data, const debug_report_data *report_data, const uint32_t regionCount,
- const VkImageCopy *ic_regions, const IMAGE_STATE *src_state, const IMAGE_STATE *dst_state) {
+bool CoreChecks::ValidateImageCopyData(const layer_data *device_data, const debug_report_data *report_data,
+ const uint32_t regionCount, const VkImageCopy *ic_regions, const IMAGE_STATE *src_state,
+ const IMAGE_STATE *dst_state) {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
@@ -2403,10 +2380,10 @@
}
// vkCmdCopyImage checks that only apply if the multiplane extension is enabled
-bool CopyImageMultiplaneValidation(const layer_data *dev_data, VkCommandBuffer command_buffer, const IMAGE_STATE *src_image_state,
- const IMAGE_STATE *dst_image_state, const VkImageCopy region) {
+bool CoreChecks::CopyImageMultiplaneValidation(const layer_data *dev_data, VkCommandBuffer command_buffer,
+ const IMAGE_STATE *src_image_state, const IMAGE_STATE *dst_image_state,
+ const VkImageCopy region) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(dev_data);
// Neither image is multiplane
if ((!FormatIsMultiplane(src_image_state->createInfo.format)) && (!FormatIsMultiplane(dst_image_state->createInfo.format))) {
@@ -2473,14 +2450,14 @@
return skip;
}
-bool PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
+bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_image_state = GetImageState(device_data, dstImage);
bool skip = false;
- const debug_report_data *report_data = device_data->report_data;
skip = ValidateImageCopyData(device_data, report_data, regionCount, pRegions, src_image_state, dst_image_state);
@@ -2729,11 +2706,11 @@
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyImage()", "VUID-vkCmdCopyImage-renderpass");
bool hit_error = false;
const char *invalid_src_layout_vuid =
- (src_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ (src_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-srcImageLayout-01917"
: "VUID-vkCmdCopyImage-srcImageLayout-00129";
const char *invalid_dst_layout_vuid =
- (dst_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ (dst_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImage-dstImageLayout-01395"
: "VUID-vkCmdCopyImage-dstImageLayout-00134";
for (uint32_t i = 0; i < regionCount; ++i) {
@@ -2750,8 +2727,9 @@
return skip;
}
-void PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
+void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
@@ -2775,9 +2753,9 @@
return true;
}
-static bool ValidateClearAttachementExtent(const layer_data *device_data, VkCommandBuffer command_buffer, uint32_t attachment_index,
- FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment, const VkRect2D &render_area,
- uint32_t rect_count, const VkClearRect *clear_rects) {
+bool CoreChecks::ValidateClearAttachmentExtent(layer_data *device_data, VkCommandBuffer command_buffer, uint32_t attachment_index,
+ FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment, const VkRect2D &render_area,
+ uint32_t rect_count, const VkClearRect *clear_rects) {
bool skip = false;
const IMAGE_VIEW_STATE *image_view_state = nullptr;
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) {
@@ -2811,12 +2789,12 @@
return skip;
}
-bool PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
- const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects) {
+bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
+ const VkClearAttachment *pAttachments, uint32_t rectCount,
+ const VkClearRect *pRects) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_node = GetCBNode(device_data, commandBuffer);
- const debug_report_data *report_data = device_data->report_data;
bool skip = false;
if (cb_node) {
@@ -2912,8 +2890,8 @@
}
}
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
- skip |= ValidateClearAttachementExtent(device_data, commandBuffer, attachment_index, framebuffer, fb_attachment,
- render_area, rectCount, pRects);
+ skip |= ValidateClearAttachmentExtent(device_data, commandBuffer, attachment_index, framebuffer, fb_attachment,
+ render_area, rectCount, pRects);
} else {
// if a secondary level command buffer inherits the framebuffer from the primary command buffer
// (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time
@@ -2926,11 +2904,12 @@
auto val_fn = [device_data, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy](
GLOBAL_CB_NODE *prim_cb, VkFramebuffer fb) {
assert(rectCount == clear_rect_copy->size());
- auto framebuffer = GetFramebufferState(device_data, fb);
+ FRAMEBUFFER_STATE *framebuffer = device_data->GetFramebufferState(device_data, fb);
const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea;
bool skip = false;
- skip = ValidateClearAttachementExtent(device_data, commandBuffer, attachment_index, framebuffer, fb_attachment,
- render_area, rectCount, clear_rect_copy->data());
+ skip =
+ device_data->ValidateClearAttachmentExtent(device_data, commandBuffer, attachment_index, framebuffer,
+ fb_attachment, render_area, rectCount, clear_rect_copy->data());
return skip;
};
cb_node->cmd_execute_commands_functions.emplace_back(val_fn);
@@ -2940,14 +2919,14 @@
return skip;
}
-bool PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
+bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageResolve *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_image_state = GetImageState(device_data, dstImage);
- const debug_report_data *report_data = device_data->report_data;
bool skip = false;
if (cb_node && src_image_state && dst_image_state) {
skip |= ValidateMemoryIsBoundToImage(device_data, src_image_state, "vkCmdResolveImage()",
@@ -2963,14 +2942,14 @@
"VUID-vkCmdResolveImage-dstImage-02003");
bool hit_error = false;
- const char *invalid_src_layout_vuid = (src_image_state->shared_presentable &&
- core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
- ? "VUID-vkCmdResolveImage-srcImageLayout-01400"
- : "VUID-vkCmdResolveImage-srcImageLayout-00261";
- const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable &&
- core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
- ? "VUID-vkCmdResolveImage-dstImageLayout-01401"
- : "VUID-vkCmdResolveImage-dstImageLayout-00263";
+ const char *invalid_src_layout_vuid =
+ (src_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ ? "VUID-vkCmdResolveImage-srcImageLayout-01400"
+ : "VUID-vkCmdResolveImage-srcImageLayout-00261";
+ const char *invalid_dst_layout_vuid =
+ (dst_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ ? "VUID-vkCmdResolveImage-dstImageLayout-01401"
+ : "VUID-vkCmdResolveImage-dstImageLayout-00263";
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
@@ -3038,8 +3017,9 @@
return skip;
}
-void PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
+void CoreChecks::PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageResolve *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
@@ -3050,13 +3030,13 @@
AddCommandBufferBindingImage(device_data, cb_node, dst_image_state);
}
-bool PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
+bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageBlit *pRegions, VkFilter filter) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_image_state = GetImageState(device_data, dstImage);
- const debug_report_data *report_data = device_data->report_data;
bool skip = false;
if (cb_node) {
@@ -3160,14 +3140,14 @@
} // Depth or Stencil
// Do per-region checks
- const char *invalid_src_layout_vuid = (src_image_state->shared_presentable &&
- core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
- ? "VUID-vkCmdBlitImage-srcImageLayout-01398"
- : "VUID-vkCmdBlitImage-srcImageLayout-00222";
- const char *invalid_dst_layout_vuid = (dst_image_state->shared_presentable &&
- core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
- ? "VUID-vkCmdBlitImage-dstImageLayout-01399"
- : "VUID-vkCmdBlitImage-dstImageLayout-00227";
+ const char *invalid_src_layout_vuid =
+ (src_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ ? "VUID-vkCmdBlitImage-srcImageLayout-01398"
+ : "VUID-vkCmdBlitImage-srcImageLayout-00222";
+ const char *invalid_dst_layout_vuid =
+ (dst_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ ? "VUID-vkCmdBlitImage-dstImageLayout-01399"
+ : "VUID-vkCmdBlitImage-dstImageLayout-00227";
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageBlit rgn = pRegions[i];
bool hit_error = false;
@@ -3371,8 +3351,9 @@
return skip;
}
-void PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
+void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageBlit *pRegions, VkFilter filter) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
@@ -3389,11 +3370,10 @@
}
// This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout
-bool ValidateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB,
- std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const &globalImageLayoutMap,
- std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &overlayLayoutMap) {
+bool CoreChecks::ValidateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB,
+ std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const &globalImageLayoutMap,
+ std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &overlayLayoutMap) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
for (auto cb_image_data : pCB->imageLayoutMap) {
VkImageLayout imageLayout;
@@ -3428,7 +3408,7 @@
return skip;
}
-void UpdateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB) {
+void CoreChecks::UpdateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB) {
for (auto cb_image_data : pCB->imageLayoutMap) {
VkImageLayout imageLayout;
FindGlobalLayout(device_data, cb_image_data.first, imageLayout);
@@ -3439,9 +3419,9 @@
// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY
// layout attachments don't have CLEAR as their loadOp.
-bool ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
- const VkImageLayout first_layout, const uint32_t attachment,
- const VkAttachmentDescription2KHR &attachment_description) {
+bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
+ const VkImageLayout first_layout, const uint32_t attachment,
+ const VkAttachmentDescription2KHR &attachment_description) {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
@@ -3483,9 +3463,8 @@
return skip;
}
-bool ValidateLayouts(const core_validation::layer_data *device_data, RenderPassCreateVersion rp_version, VkDevice device,
- const VkRenderPassCreateInfo2KHR *pCreateInfo) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateLayouts(layer_data *device_data, RenderPassCreateVersion rp_version, VkDevice device,
+ const VkRenderPassCreateInfo2KHR *pCreateInfo) {
bool skip = false;
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
@@ -3691,9 +3670,8 @@
}
// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
-bool ValidateMapImageLayouts(core_validation::layer_data *device_data, VkDevice device, DEVICE_MEM_INFO const *mem_info,
- VkDeviceSize offset, VkDeviceSize end_offset) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateMapImageLayouts(layer_data *device_data, VkDevice device, DEVICE_MEM_INFO const *mem_info,
+ VkDeviceSize offset, VkDeviceSize end_offset) {
bool skip = false;
// Iterate over all bound image ranges and verify that for any that overlap the map ranges, the layouts are
// VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
@@ -3723,10 +3701,9 @@
// Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict
// is true, verify that (actual & desired) flags == desired
-static bool ValidateUsageFlags(const layer_data *device_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
- VulkanObjectType obj_type, const char *msgCode, char const *func_name, char const *usage_str) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
-
+bool CoreChecks::ValidateUsageFlags(const layer_data *device_data, VkFlags actual, VkFlags desired, VkBool32 strict,
+ uint64_t obj_handle, VulkanObjectType obj_type, const char *msgCode, char const *func_name,
+ char const *usage_str) {
bool correct_usage = false;
bool skip = false;
const char *type_str = object_string[obj_type];
@@ -3753,16 +3730,15 @@
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
-bool ValidateImageUsageFlags(layer_data *device_data, IMAGE_STATE const *image_state, VkFlags desired, bool strict,
- const char *msgCode, char const *func_name, char const *usage_string) {
+bool CoreChecks::ValidateImageUsageFlags(layer_data *device_data, IMAGE_STATE const *image_state, VkFlags desired, bool strict,
+ const char *msgCode, char const *func_name, char const *usage_string) {
return ValidateUsageFlags(device_data, image_state->createInfo.usage, desired, strict, HandleToUint64(image_state->image),
kVulkanObjectTypeImage, msgCode, func_name, usage_string);
}
-bool ValidateImageFormatFeatureFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
- char const *func_name, const char *linear_vuid, const char *optimal_vuid) {
+bool CoreChecks::ValidateImageFormatFeatureFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
+ char const *func_name, const char *linear_vuid, const char *optimal_vuid) {
VkFormatProperties format_properties = GetPDFormatProperties(dev_data, image_state->createInfo.format);
- const debug_report_data *report_data = core_validation::GetReportData(dev_data);
bool skip = false;
if (image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR) {
if ((format_properties.linearTilingFeatures & desired) != desired) {
@@ -3784,11 +3760,10 @@
return skip;
}
-bool ValidateImageSubresourceLayers(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
- const VkImageSubresourceLayers *subresource_layers, char const *func_name, char const *member,
- uint32_t i) {
+bool CoreChecks::ValidateImageSubresourceLayers(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
+ const VkImageSubresourceLayers *subresource_layers, char const *func_name,
+ char const *member, uint32_t i) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(dev_data);
// layerCount must not be zero
if (subresource_layers->layerCount == 0) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -3815,16 +3790,15 @@
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
-bool ValidateBufferUsageFlags(const layer_data *device_data, BUFFER_STATE const *buffer_state, VkFlags desired, bool strict,
- const char *msgCode, char const *func_name, char const *usage_string) {
+bool CoreChecks::ValidateBufferUsageFlags(const layer_data *device_data, BUFFER_STATE const *buffer_state, VkFlags desired,
+ bool strict, const char *msgCode, char const *func_name, char const *usage_string) {
return ValidateUsageFlags(device_data, buffer_state->createInfo.usage, desired, strict, HandleToUint64(buffer_state->buffer),
kVulkanObjectTypeBuffer, msgCode, func_name, usage_string);
}
-bool ValidateBufferViewRange(const layer_data *device_data, const BUFFER_STATE *buffer_state,
- const VkBufferViewCreateInfo *pCreateInfo, const VkPhysicalDeviceLimits *device_limits) {
+bool CoreChecks::ValidateBufferViewRange(const layer_data *device_data, const BUFFER_STATE *buffer_state,
+ const VkBufferViewCreateInfo *pCreateInfo, const VkPhysicalDeviceLimits *device_limits) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
const VkDeviceSize &range = pCreateInfo->range;
if (range != VK_WHOLE_SIZE) {
@@ -3869,10 +3843,9 @@
return skip;
}
-bool ValidateBufferViewBuffer(const layer_data *device_data, const BUFFER_STATE *buffer_state,
- const VkBufferViewCreateInfo *pCreateInfo) {
+bool CoreChecks::ValidateBufferViewBuffer(const layer_data *device_data, const BUFFER_STATE *buffer_state,
+ const VkBufferViewCreateInfo *pCreateInfo) {
bool skip = false;
- const debug_report_data *report_data = GetReportData(device_data);
const VkFormatProperties format_properties = GetPDFormatProperties(device_data, pCreateInfo->format);
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) {
@@ -3891,12 +3864,11 @@
return skip;
}
-bool PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkBuffer *pBuffer) {
+bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
- const debug_report_data *report_data = device_data->report_data;
// TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting)
@@ -3952,17 +3924,17 @@
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
- skip |= core_validation::ValidateQueueFamilies(
- device_data, pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices, "vkCreateBuffer",
- "pCreateInfo->pQueueFamilyIndices", "VUID-VkBufferCreateInfo-sharingMode-01419",
- "VUID-VkBufferCreateInfo-sharingMode-01419", false);
+ skip |=
+ ValidateQueueFamilies(device_data, pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
+ "vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", "VUID-VkBufferCreateInfo-sharingMode-01419",
+ "VUID-VkBufferCreateInfo-sharingMode-01419", false);
}
return skip;
}
-void PostCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkBuffer *pBuffer, VkResult result) {
+void CoreChecks::PostCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (result != VK_SUCCESS) return;
// TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
@@ -3970,12 +3942,11 @@
->insert(std::make_pair(*pBuffer, std::unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
}
-bool PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
+bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
- const debug_report_data *report_data = device_data->report_data;
BUFFER_STATE *buffer_state = GetBufferState(device_data, pCreateInfo->buffer);
// If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
if (buffer_state) {
@@ -4014,17 +3985,16 @@
return skip;
}
-void PostCallRecordCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkBufferView *pView, VkResult result) {
+void CoreChecks::PostCallRecordCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkBufferView *pView, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (result != VK_SUCCESS) return;
(*GetBufferViewMap(device_data))[*pView] = std::unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
}
// For the given format verify that the aspect masks make sense
-bool ValidateImageAspectMask(const layer_data *device_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
- const char *func_name, const char *vuid) {
- const debug_report_data *report_data = device_data->report_data;
+bool CoreChecks::ValidateImageAspectMask(const layer_data *device_data, VkImage image, VkFormat format,
+ VkImageAspectFlags aspect_mask, const char *func_name, const char *vuid) {
bool skip = false;
VkDebugReportObjectTypeEXT objectType = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT;
if (image != VK_NULL_HANDLE) {
@@ -4082,15 +4052,10 @@
return skip;
}
-struct SubresourceRangeErrorCodes {
- const char *base_mip_err, *mip_count_err, *base_layer_err, *layer_count_err;
-};
-
-bool ValidateImageSubresourceRange(const layer_data *device_data, const uint32_t image_mip_count, const uint32_t image_layer_count,
- const VkImageSubresourceRange &subresourceRange, const char *cmd_name, const char *param_name,
- const char *image_layer_count_var_name, const uint64_t image_handle,
- SubresourceRangeErrorCodes errorCodes) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateImageSubresourceRange(const layer_data *device_data, const uint32_t image_mip_count,
+ const uint32_t image_layer_count, const VkImageSubresourceRange &subresourceRange,
+ const char *cmd_name, const char *param_name, const char *image_layer_count_var_name,
+ const uint64_t image_handle, SubresourceRangeErrorCodes errorCodes) {
bool skip = false;
// Validate mip levels
@@ -4151,8 +4116,9 @@
return skip;
}
-bool ValidateCreateImageViewSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- bool is_imageview_2d_type, const VkImageSubresourceRange &subresourceRange) {
+bool CoreChecks::ValidateCreateImageViewSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
+ bool is_imageview_2d_type,
+ const VkImageSubresourceRange &subresourceRange) {
bool is_khr_maintenance1 = GetDeviceExtensions(device_data)->vk_khr_maintenance1;
bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR);
@@ -4177,8 +4143,8 @@
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
-bool ValidateCmdClearColorSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- const VkImageSubresourceRange &subresourceRange, const char *param_name) {
+bool CoreChecks::ValidateCmdClearColorSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
+ const VkImageSubresourceRange &subresourceRange, const char *param_name) {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692";
@@ -4190,8 +4156,8 @@
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
-bool ValidateCmdClearDepthSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- const VkImageSubresourceRange &subresourceRange, const char *param_name) {
+bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
+ const VkImageSubresourceRange &subresourceRange, const char *param_name) {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474";
subresourceRangeErrorCodes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694";
@@ -4203,9 +4169,9 @@
HandleToUint64(image_state->image), subresourceRangeErrorCodes);
}
-bool ValidateImageBarrierSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
- const char *param_name) {
+bool CoreChecks::ValidateImageBarrierSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
+ const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
+ const char *param_name) {
SubresourceRangeErrorCodes subresourceRangeErrorCodes = {};
subresourceRangeErrorCodes.base_mip_err = "VUID-VkImageMemoryBarrier-subresourceRange-01486";
subresourceRangeErrorCodes.mip_count_err = "VUID-VkImageMemoryBarrier-subresourceRange-01724";
@@ -4217,10 +4183,9 @@
subresourceRangeErrorCodes);
}
-bool PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
+bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
- const debug_report_data *report_data = device_data->report_data;
bool skip = false;
IMAGE_STATE *image_state = GetImageState(device_data, pCreateInfo->image);
if (image_state) {
@@ -4454,8 +4419,8 @@
return skip;
}
-void PostCallRecordCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkImageView *pView, VkResult result) {
+void CoreChecks::PostCallRecordCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkImageView *pView, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (result != VK_SUCCESS) return;
auto image_view_map = GetImageViewMap(device_data);
@@ -4467,8 +4432,8 @@
sub_res_range.layerCount = ResolveRemainingLayers(&sub_res_range, image_state->createInfo.arrayLayers);
}
-bool PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
- const VkBufferCopy *pRegions) {
+bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
@@ -4494,8 +4459,8 @@
return skip;
}
-void PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
- const VkBufferCopy *pRegions) {
+void CoreChecks::PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
+ uint32_t regionCount, const VkBufferCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
@@ -4506,8 +4471,7 @@
AddCommandBufferBindingBuffer(device_data, cb_node, dst_buffer_state);
}
-static bool ValidateIdleBuffer(layer_data *device_data, VkBuffer buffer) {
- const debug_report_data *report_data = core_validation::GetReportData(device_data);
+bool CoreChecks::ValidateIdleBuffer(layer_data *device_data, VkBuffer buffer) {
bool skip = false;
auto buffer_state = GetBufferState(device_data, buffer);
if (!buffer_state) {
@@ -4524,7 +4488,7 @@
return skip;
}
-bool PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
IMAGE_VIEW_STATE *image_view_state = GetImageViewState(device_data, imageView);
VK_OBJECT obj_struct = {HandleToUint64(imageView), kVulkanObjectTypeImageView};
@@ -4537,7 +4501,7 @@
return skip;
}
-void PreCallRecordDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
IMAGE_VIEW_STATE *image_view_state = GetImageViewState(device_data, imageView);
if (!image_view_state) return;
@@ -4548,7 +4512,7 @@
(*GetImageViewMap(device_data)).erase(imageView);
}
-bool PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto buffer_state = GetBufferState(device_data, buffer);
@@ -4559,7 +4523,7 @@
return skip;
}
-void PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!buffer) return;
auto buffer_state = GetBufferState(device_data, buffer);
@@ -4569,7 +4533,7 @@
for (auto mem_binding : buffer_state->GetBoundMemory()) {
auto mem_info = GetMemObjInfo(device_data, mem_binding);
if (mem_info) {
- core_validation::RemoveBufferMemoryRange(HandleToUint64(buffer), mem_info);
+ RemoveBufferMemoryRange(HandleToUint64(buffer), mem_info);
}
}
ClearMemoryObjectBindings(device_data, HandleToUint64(buffer), kVulkanObjectTypeBuffer);
@@ -4577,7 +4541,8 @@
GetBufferMap(device_data)->erase(buffer_state->buffer);
}
-bool PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto buffer_view_state = GetBufferViewState(device_data, bufferView);
VK_OBJECT obj_struct = {HandleToUint64(bufferView), kVulkanObjectTypeBufferView};
@@ -4589,7 +4554,7 @@
return skip;
}
-void PreCallRecordDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!bufferView) return;
auto buffer_view_state = GetBufferViewState(device_data, bufferView);
@@ -4600,8 +4565,8 @@
GetBufferViewMap(device_data)->erase(bufferView);
}
-bool PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
- uint32_t data) {
+bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize size, uint32_t data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto buffer_state = GetBufferState(device_data, dstBuffer);
@@ -4619,8 +4584,8 @@
return skip;
}
-void PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
- uint32_t data) {
+void CoreChecks::PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize size, uint32_t data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto buffer_state = GetBufferState(device_data, dstBuffer);
@@ -4628,8 +4593,8 @@
AddCommandBufferBindingBuffer(device_data, cb_node, buffer_state);
}
-bool ValidateBufferImageCopyData(const debug_report_data *report_data, uint32_t regionCount, const VkBufferImageCopy *pRegions,
- IMAGE_STATE *image_state, const char *function) {
+bool CoreChecks::ValidateBufferImageCopyData(const debug_report_data *report_data, uint32_t regionCount,
+ const VkBufferImageCopy *pRegions, IMAGE_STATE *image_state, const char *function) {
bool skip = false;
for (uint32_t i = 0; i < regionCount; i++) {
@@ -4910,13 +4875,12 @@
return skip;
}
-bool PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
- const debug_report_data *report_data = device_data->report_data;
bool skip = ValidateBufferImageCopyData(report_data, regionCount, pRegions, src_image_state, "vkCmdCopyImageToBuffer");
@@ -4926,7 +4890,7 @@
// Command pool must support graphics, compute, or transfer operations
auto pPool = GetCommandPoolNode(device_data, cb_node->createInfo.commandPool);
- VkQueueFlags queue_flags = GetPhysicalDeviceState(device_data)->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
+ VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -4961,7 +4925,7 @@
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyImageToBuffer()", "VUID-vkCmdCopyImageToBuffer-renderpass");
bool hit_error = false;
const char *src_invalid_layout_vuid =
- (src_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ (src_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190";
for (uint32_t i = 0; i < regionCount; ++i) {
@@ -4983,8 +4947,8 @@
return skip;
}
-void PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_image_state = GetImageState(device_data, srcImage);
@@ -4999,13 +4963,13 @@
AddCommandBufferBindingBuffer(device_data, cb_node, dst_buffer_state);
}
-bool PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
auto dst_image_state = GetImageState(device_data, dstImage);
- const debug_report_data *report_data = device_data->report_data;
bool skip = ValidateBufferImageCopyData(report_data, regionCount, pRegions, dst_image_state, "vkCmdCopyBufferToImage");
@@ -5014,7 +4978,7 @@
// Command pool must support graphics, compute, or transfer operations
auto pPool = GetCommandPoolNode(device_data, cb_node->createInfo.commandPool);
- VkQueueFlags queue_flags = GetPhysicalDeviceState(device_data)->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
+ VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pPool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_node->createInfo.commandPool), "VUID-vkCmdCopyBufferToImage-commandBuffer-cmdpool",
@@ -5045,7 +5009,7 @@
skip |= InsideRenderPass(device_data, cb_node, "vkCmdCopyBufferToImage()", "VUID-vkCmdCopyBufferToImage-renderpass");
bool hit_error = false;
const char *dst_invalid_layout_vuid =
- (dst_image_state->shared_presentable && core_validation::GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
+ (dst_image_state->shared_presentable && GetDeviceExtensions(device_data)->vk_khr_shared_presentable_image)
? "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181";
for (uint32_t i = 0; i < regionCount; ++i) {
@@ -5067,8 +5031,9 @@
return skip;
}
-void PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
+void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkBufferImageCopy *pRegions) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_node = GetCBNode(device_data, commandBuffer);
auto src_buffer_state = GetBufferState(device_data, srcBuffer);
@@ -5082,8 +5047,8 @@
AddCommandBufferBindingImage(device_data, cb_node, dst_image_state);
}
-bool PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
- VkSubresourceLayout *pLayout) {
+bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
+ VkSubresourceLayout *pLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
const auto report_data = device_data->report_data;
bool skip = false;
diff --git a/layers/buffer_validation.h b/layers/buffer_validation.h
index 0f265b6..d3abb71 100644
--- a/layers/buffer_validation.h
+++ b/layers/buffer_validation.h
@@ -30,265 +30,21 @@
#include <algorithm>
#include <bitset>
-using core_validation::instance_layer_data;
-using core_validation::layer_data;
+class CoreChecks;
+typedef CoreChecks layer_data;
uint32_t FullMipChainLevels(uint32_t height, uint32_t width = 1, uint32_t depth = 1);
uint32_t FullMipChainLevels(VkExtent3D);
uint32_t FullMipChainLevels(VkExtent2D);
-bool PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkImage *pImage);
-
-void PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkImage *pImage, VkResult result);
-
-void PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator);
-
-bool PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator);
-
-bool ValidateImageAttributes(layer_data *device_data, IMAGE_STATE *image_state, VkImageSubresourceRange range);
-
uint32_t ResolveRemainingLevels(const VkImageSubresourceRange *range, uint32_t mip_levels);
uint32_t ResolveRemainingLayers(const VkImageSubresourceRange *range, uint32_t layers);
-bool VerifyClearImageLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state,
- VkImageSubresourceRange range, VkImageLayout dest_image_layout, const char *func_name);
-
-bool VerifyImageLayout(layer_data const *device_data, GLOBAL_CB_NODE const *cb_node, IMAGE_STATE *image_state,
- VkImageSubresourceLayers subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout,
- const char *caller, const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error);
-
-void RecordClearImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
- VkImageLayout dest_image_layout);
-
-bool PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearColorValue *pColor, uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges);
-
-void PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearColorValue *pColor, uint32_t rangeCount, const VkImageSubresourceRange *pRanges);
-
-bool PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges);
-
-void PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
- const VkImageSubresourceRange *pRanges);
-
-bool FindLayoutVerifyNode(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, ImageSubresourcePair imgpair,
- IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask);
-
-bool FindLayoutVerifyLayout(layer_data const *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout,
- const VkImageAspectFlags aspectMask);
-
-bool FindCmdBufLayout(layer_data const *device_data, GLOBAL_CB_NODE const *pCB, VkImage image, VkImageSubresource range,
- IMAGE_CMD_BUF_LAYOUT_NODE &node);
-
-bool FindGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, VkImageLayout &layout);
-
-bool FindLayouts(layer_data *device_data, VkImage image, std::vector<VkImageLayout> &layouts);
-
bool FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair,
VkImageLayout &layout, const VkImageAspectFlags aspectMask);
-bool FindLayout(layer_data *device_data, const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap,
- ImageSubresourcePair imgpair, VkImageLayout &layout);
-
-void SetGlobalLayout(layer_data *device_data, ImageSubresourcePair imgpair, const VkImageLayout &layout);
-
-void SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node);
-
-void SetLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout);
-
void SetLayout(std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &imageLayoutMap, ImageSubresourcePair imgpair,
VkImageLayout layout);
-void SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout);
-void SetImageViewLayout(layer_data *device_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state,
- const VkImageLayout &layout);
-
-bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, RenderPassCreateVersion rp_version, GLOBAL_CB_NODE *pCB,
- const VkRenderPassBeginInfo *pRenderPassBegin,
- const FRAMEBUFFER_STATE *framebuffer_state);
-
-void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
- const safe_VkAttachmentReference2KHR &ref);
-
-void TransitionSubpassLayouts(layer_data *, GLOBAL_CB_NODE *, const RENDER_PASS_STATE *, const int, FRAMEBUFFER_STATE *);
-
-void TransitionBeginRenderPassLayouts(layer_data *, GLOBAL_CB_NODE *, const RENDER_PASS_STATE *, FRAMEBUFFER_STATE *);
-
-bool ValidateImageAspectLayout(layer_data *device_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
- uint32_t level, uint32_t layer, VkImageAspectFlags aspect);
-
-void TransitionImageAspectLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier, uint32_t level,
- uint32_t layer, VkImageAspectFlags aspect);
-
-bool ValidateBarrierLayoutToImageUsage(layer_data *device_data, const VkImageMemoryBarrier *img_barrier, bool new_not_old,
- VkImageUsageFlags usage, const char *func_name);
-
-bool ValidateBarriersToImages(layer_data *device_data, GLOBAL_CB_NODE const *cb_state, uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier *pImageMemoryBarriers, const char *func_name);
-
-bool ValidateBarriersQFOTransferUniqueness(layer_data *device_data, const char *func_name, GLOBAL_CB_NODE *cb_state,
- uint32_t bufferBarrierCount, const VkBufferMemoryBarrier *pBufferMemBarriers,
- uint32_t imageMemBarrierCount, const VkImageMemoryBarrier *pImageMemBarriers);
-
-void RecordBarriersQFOTransfers(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t bufferBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
- const VkImageMemoryBarrier *pImageMemBarriers);
-
-bool ValidateQueuedQFOTransfers(layer_data *dev_data, GLOBAL_CB_NODE *pCB,
- QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
- QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards);
-
-void RecordQueuedQFOTransfers(layer_data *dev_data, GLOBAL_CB_NODE *pCB);
-void EraseQFOImageRelaseBarriers(layer_data *device_data, const VkImage &image);
-
-void TransitionImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t memBarrierCount,
- const VkImageMemoryBarrier *pImgMemBarriers);
-
-void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
- FRAMEBUFFER_STATE *framebuffer_state);
-
-bool PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions);
-
-bool PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
- const VkClearAttachment *pAttachments, uint32_t rectCount, const VkClearRect *pRects);
-
-bool PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions);
-
-void PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions);
-
-bool PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter);
-
-void PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter);
-
-bool ValidateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB,
- std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const &globalImageLayoutMap,
- std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> &overlayLayoutMap);
-
-void UpdateCmdBufImageLayouts(layer_data *device_data, GLOBAL_CB_NODE *pCB);
-
-bool ValidateMaskBitsFromLayouts(core_validation::layer_data *device_data, VkCommandBuffer cmdBuffer,
- const VkAccessFlags &accessMask, const VkImageLayout &layout, const char *type);
-
-bool ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
- const VkImageLayout first_layout, const uint32_t attachment,
- const VkAttachmentDescription2KHR &attachment_description);
-
-bool ValidateLayouts(const core_validation::layer_data *dev_data, RenderPassCreateVersion rp_version, VkDevice device,
- const VkRenderPassCreateInfo2KHR *pCreateInfo);
-
-bool ValidateMapImageLayouts(core_validation::layer_data *dev_data, VkDevice device, DEVICE_MEM_INFO const *mem_info,
- VkDeviceSize offset, VkDeviceSize end_offset);
-
-bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, bool strict,
- const char *msgCode, char const *func_name, char const *usage_string);
-
-bool ValidateImageFormatFeatureFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
- char const *func_name, const char *linear_vuid, const char *optimal_vuid);
-
-bool ValidateImageSubresourceLayers(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
- const VkImageSubresourceLayers *subresource_layers, char const *func_name, char const *member,
- uint32_t i);
-
-bool ValidateBufferUsageFlags(const layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, bool strict,
- const char *msgCode, char const *func_name, char const *usage_string);
-
-bool PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkBuffer *pBuffer);
-
-void PostCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkBuffer *pBuffer, VkResult result);
-
-bool PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkBufferView *pView);
-
-void PostCallRecordCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkBufferView *pView, VkResult result);
-
-bool ValidateImageAspectMask(const layer_data *device_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
- const char *func_name, const char *vuid = "VUID-VkImageSubresource-aspectMask-parameter");
-
-bool ValidateCreateImageViewSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- bool is_imageview_2d_type, const VkImageSubresourceRange &subresourceRange);
-
-bool ValidateCmdClearColorSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- const VkImageSubresourceRange &subresourceRange, const char *param_name);
-
-bool ValidateCmdClearDepthSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- const VkImageSubresourceRange &subresourceRange, const char *param_name);
-
-bool ValidateImageBarrierSubresourceRange(const layer_data *device_data, const IMAGE_STATE *image_state,
- const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
- const char *param_name);
-
-bool PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkImageView *pView);
-
-void PostCallRecordCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkImageView *pView, VkResult result);
-
-bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *device_data, const GLOBAL_CB_NODE *cb_node,
- const IMAGE_STATE *img, const VkBufferImageCopy *region,
- const uint32_t i, const char *function, const char *vuid);
-
-bool ValidateImageMipLevel(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
- const uint32_t i, const char *function, const char *member, const char *vuid);
-
-bool ValidateImageArrayLayerRange(layer_data *device_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img,
- const uint32_t base_layer, const uint32_t layer_count, const uint32_t i, const char *function,
- const char *member, const char *vuid);
-
-void PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions);
-
-bool PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
- const VkBufferCopy *pRegions);
-
-void PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
- const VkBufferCopy *pRegions);
-
-bool PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator);
-
-void PreCallRecordDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator);
-
-bool PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator);
-
-void PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator);
-
-bool PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator);
-
-void PreCallRecordDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator);
-
-bool PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
- uint32_t data);
-
-void PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
- uint32_t data);
-
-bool PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions);
-
-void PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions);
-
-bool PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions);
-
-void PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy *pRegions);
-
-bool PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
- VkSubresourceLayout *pLayout);
-
#endif // CORE_VALIDATION_BUFFER_VALIDATION_H_
diff --git a/layers/core_validation.cpp b/layers/core_validation.cpp
index 0b3e9c3..5a9774e 100644
--- a/layers/core_validation.cpp
+++ b/layers/core_validation.cpp
@@ -64,20 +64,13 @@
#if defined(__GNUC__)
#pragma GCC diagnostic warning "-Wwrite-strings"
#endif
+#include "chassis.h"
#include "convert_to_renderpass2.h"
#include "core_validation.h"
#include "buffer_validation.h"
#include "shader_validation.h"
-#include "vk_layer_data.h"
#include "vk_layer_utils.h"
-// This intentionally includes a cpp file
-#include "vk_safe_struct.cpp"
-
-using mutex_t = std::mutex;
-using lock_guard_t = std::lock_guard<mutex_t>;
-using unique_lock_t = std::unique_lock<mutex_t>;
-
// These functions are defined *outside* the core_validation namespace as their type
// is also defined outside that namespace
size_t PipelineLayoutCompatDef::hash() const {
@@ -114,8 +107,6 @@
return true;
}
-namespace core_validation {
-
using std::max;
using std::string;
using std::stringstream;
@@ -130,25 +121,36 @@
// 2nd special memory handle used to flag object as unbound from memory
static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
-// TODO : Do we need to guard access to layer_data_map w/ lock?
-unordered_map<void *, layer_data *> layer_data_map;
-unordered_map<void *, instance_layer_data *> instance_layer_data_map;
+// Return buffer state ptr for specified buffer or else NULL
+BUFFER_STATE *CoreChecks::GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
+ auto buff_it = dev_data->bufferMap.find(buffer);
+ if (buff_it == dev_data->bufferMap.end()) {
+ return nullptr;
+ }
+ return buff_it->second.get();
+}
-// TODO : This can be much smarter, using separate locks for separate global data
-mutex_t global_lock;
+// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
+IMAGE_VIEW_STATE *CoreChecks::GetImageViewState(layer_data *dev_data, VkImageView image_view) {
+ auto iv_it = dev_data->imageViewMap.find(image_view);
+ if (iv_it == dev_data->imageViewMap.end()) {
+ return nullptr;
+ }
+ return iv_it->second.get();
+}
// Get the global map of pending releases
-GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
+GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
layer_data *dev_data, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag) {
return dev_data->qfo_release_image_barrier_map;
}
-GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
+GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &CoreChecks::GetGlobalQFOReleaseBarrierMap(
layer_data *dev_data, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag) {
return dev_data->qfo_release_buffer_barrier_map;
}
// Get the image viewstate for a given framebuffer attachment
-IMAGE_VIEW_STATE *GetAttachmentImageViewState(layer_data *dev_data, FRAMEBUFFER_STATE *framebuffer, uint32_t index) {
+IMAGE_VIEW_STATE *CoreChecks::GetAttachmentImageViewState(layer_data *dev_data, FRAMEBUFFER_STATE *framebuffer, uint32_t index) {
assert(framebuffer && (index < framebuffer->createInfo.attachmentCount));
#ifdef FRAMEBUFFER_ATTACHMENT_STATE_CACHE
return framebuffer->attachments[index].view_state;
@@ -158,16 +160,8 @@
#endif
}
-// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
-IMAGE_VIEW_STATE *GetImageViewState(const layer_data *dev_data, VkImageView image_view) {
- auto iv_it = dev_data->imageViewMap.find(image_view);
- if (iv_it == dev_data->imageViewMap.end()) {
- return nullptr;
- }
- return iv_it->second.get();
-}
// Return sampler node ptr for specified sampler or else NULL
-SAMPLER_STATE *GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
+SAMPLER_STATE *CoreChecks::GetSamplerState(const layer_data *dev_data, VkSampler sampler) {
auto sampler_it = dev_data->samplerMap.find(sampler);
if (sampler_it == dev_data->samplerMap.end()) {
return nullptr;
@@ -175,23 +169,15 @@
return sampler_it->second.get();
}
// Return image state ptr for specified image or else NULL
-IMAGE_STATE *GetImageState(const layer_data *dev_data, VkImage image) {
+IMAGE_STATE *CoreChecks::GetImageState(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->imageMap.find(image);
if (img_it == dev_data->imageMap.end()) {
return nullptr;
}
return img_it->second.get();
}
-// Return buffer state ptr for specified buffer or else NULL
-BUFFER_STATE *GetBufferState(const layer_data *dev_data, VkBuffer buffer) {
- auto buff_it = dev_data->bufferMap.find(buffer);
- if (buff_it == dev_data->bufferMap.end()) {
- return nullptr;
- }
- return buff_it->second.get();
-}
// Return swapchain node for specified swapchain or else NULL
-SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
+SWAPCHAIN_NODE *CoreChecks::GetSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
auto swp_it = dev_data->swapchainMap.find(swapchain);
if (swp_it == dev_data->swapchainMap.end()) {
return nullptr;
@@ -199,7 +185,7 @@
return swp_it->second.get();
}
// Return buffer node ptr for specified buffer or else NULL
-BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
+BUFFER_VIEW_STATE *CoreChecks::GetBufferViewState(const layer_data *dev_data, VkBufferView buffer_view) {
auto bv_it = dev_data->bufferViewMap.find(buffer_view);
if (bv_it == dev_data->bufferViewMap.end()) {
return nullptr;
@@ -207,7 +193,7 @@
return bv_it->second.get();
}
-FENCE_NODE *GetFenceNode(layer_data *dev_data, VkFence fence) {
+FENCE_NODE *CoreChecks::GetFenceNode(layer_data *dev_data, VkFence fence) {
auto it = dev_data->fenceMap.find(fence);
if (it == dev_data->fenceMap.end()) {
return nullptr;
@@ -215,7 +201,7 @@
return &it->second;
}
-EVENT_STATE *GetEventNode(layer_data *dev_data, VkEvent event) {
+EVENT_STATE *CoreChecks::GetEventNode(layer_data *dev_data, VkEvent event) {
auto it = dev_data->eventMap.find(event);
if (it == dev_data->eventMap.end()) {
return nullptr;
@@ -223,7 +209,7 @@
return &it->second;
}
-QUERY_POOL_NODE *GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
+QUERY_POOL_NODE *CoreChecks::GetQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
auto it = dev_data->queryPoolMap.find(query_pool);
if (it == dev_data->queryPoolMap.end()) {
return nullptr;
@@ -231,7 +217,7 @@
return &it->second;
}
-QUEUE_STATE *GetQueueState(layer_data *dev_data, VkQueue queue) {
+QUEUE_STATE *CoreChecks::GetQueueState(layer_data *dev_data, VkQueue queue) {
auto it = dev_data->queueMap.find(queue);
if (it == dev_data->queueMap.end()) {
return nullptr;
@@ -239,7 +225,7 @@
return &it->second;
}
-SEMAPHORE_NODE *GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
+SEMAPHORE_NODE *CoreChecks::GetSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
auto it = dev_data->semaphoreMap.find(semaphore);
if (it == dev_data->semaphoreMap.end()) {
return nullptr;
@@ -247,7 +233,7 @@
return &it->second;
}
-COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
+COMMAND_POOL_NODE *CoreChecks::GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
auto it = dev_data->commandPoolMap.find(pool);
if (it == dev_data->commandPoolMap.end()) {
return nullptr;
@@ -255,42 +241,28 @@
return &it->second;
}
-PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
- auto it = instance_data->physical_device_map.find(phys);
- if (it == instance_data->physical_device_map.end()) {
+PHYSICAL_DEVICE_STATE *CoreChecks::GetPhysicalDeviceState(VkPhysicalDevice phys) {
+ auto *phys_dev_map = ((physical_device_map.size() > 0) ? &physical_device_map : &instance_state->physical_device_map);
+ auto it = phys_dev_map->find(phys);
+ if (it == phys_dev_map->end()) {
return nullptr;
}
return &it->second;
}
-PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(const layer_data *device_data) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(device_data->physical_device), instance_layer_data_map);
- auto it = instance_data->physical_device_map.find(device_data->physical_device);
- if (it == instance_data->physical_device_map.end()) {
- return nullptr;
- }
- return &it->second;
-}
+PHYSICAL_DEVICE_STATE *CoreChecks::GetPhysicalDeviceState() { return physical_device_state; }
-PHYSICAL_DEVICE_STATE *GetPhysicalDeviceState(layer_data *device_data) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(device_data->physical_device), instance_layer_data_map);
- auto it = instance_data->physical_device_map.find(device_data->physical_device);
- if (it == instance_data->physical_device_map.end()) {
- return nullptr;
- }
- return &it->second;
-}
-
-SURFACE_STATE *GetSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
- auto it = instance_data->surface_map.find(surface);
- if (it == instance_data->surface_map.end()) {
+SURFACE_STATE *CoreChecks::GetSurfaceState(VkSurfaceKHR surface) {
+ auto *surf_map = ((surface_map.size() > 0) ? &surface_map : &instance_state->surface_map);
+ auto it = surf_map->find(surface);
+ if (it == surf_map->end()) {
return nullptr;
}
return &it->second;
}
// Return ptr to memory binding for given handle of specified type
-static BINDABLE *GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
+BINDABLE *CoreChecks::GetObjectMemBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
switch (type) {
case kVulkanObjectTypeImage:
return GetImageState(dev_data, VkImage(handle));
@@ -302,11 +274,11 @@
return nullptr;
}
-std::unordered_map<VkSamplerYcbcrConversion, uint64_t> *GetYcbcrConversionFormatMap(core_validation::layer_data *device_data) {
+std::unordered_map<VkSamplerYcbcrConversion, uint64_t> *CoreChecks::GetYcbcrConversionFormatMap(layer_data *device_data) {
return &device_data->ycbcr_conversion_ahb_fmt_map;
}
-std::unordered_set<uint64_t> *GetAHBExternalFormatsSet(core_validation::layer_data *device_data) {
+std::unordered_set<uint64_t> *CoreChecks::GetAHBExternalFormatsSet(layer_data *device_data) {
return &device_data->ahb_ext_formats_set;
}
@@ -315,7 +287,7 @@
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
-DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
+DEVICE_MEM_INFO *CoreChecks::GetMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
auto mem_it = dev_data->memObjMap.find(mem);
if (mem_it == dev_data->memObjMap.end()) {
return NULL;
@@ -323,7 +295,8 @@
return mem_it->second.get();
}
-static void AddMemObjInfo(layer_data *dev_data, void *object, const VkDeviceMemory mem, const VkMemoryAllocateInfo *pAllocateInfo) {
+void CoreChecks::AddMemObjInfo(layer_data *dev_data, void *object, const VkDeviceMemory mem,
+ const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
auto *mem_info = new DEVICE_MEM_INFO(object, mem, pAllocateInfo);
@@ -343,13 +316,13 @@
}
// Create binding link between given sampler and command buffer node
-void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
+void CoreChecks::AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
sampler_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(sampler_state->sampler), kVulkanObjectTypeSampler});
}
// Create binding link between given image node and command buffer node
-void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
+void CoreChecks::AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
// Skip validation if this image was created through WSI
if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
@@ -368,7 +341,8 @@
}
// Create binding link between given image view node and its image with command buffer node
-void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
+void CoreChecks::AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node,
+ IMAGE_VIEW_STATE *view_state) {
// First add bindings for imageView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(view_state->image_view), kVulkanObjectTypeImageView});
@@ -380,7 +354,7 @@
}
// Create binding link between given buffer node and command buffer node
-void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
+void CoreChecks::AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
// First update CB binding in MemObj mini CB list
for (auto mem_binding : buffer_state->GetBoundMemory()) {
DEVICE_MEM_INFO *pMemInfo = GetMemObjInfo(dev_data, mem_binding);
@@ -396,7 +370,8 @@
}
// Create binding link between given buffer view node and its buffer with command buffer node
-void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
+void CoreChecks::AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node,
+ BUFFER_VIEW_STATE *view_state) {
// First add bindings for bufferView
view_state->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({HandleToUint64(view_state->buffer_view), kVulkanObjectTypeBufferView});
@@ -408,7 +383,7 @@
}
// For every mem obj bound to particular CB, free bindings related to that CB
-static void ClearCmdBufAndMemReferences(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
+void CoreChecks::ClearCmdBufAndMemReferences(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
if (cb_node) {
if (cb_node->memObjs.size() > 0) {
for (auto mem : cb_node->memObjs) {
@@ -423,7 +398,7 @@
}
// Clear a single object binding from given memory object
-static void ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
+void CoreChecks::ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem) {
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(dev_data, mem);
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list
if (mem_info) {
@@ -434,7 +409,7 @@
// ClearMemoryObjectBindings clears the binding of objects to memory
// For the given object it pulls the memory bindings and makes sure that the bindings
// no longer refer to the object being cleared. This occurs when objects are destroyed.
-void ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
+void CoreChecks::ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type) {
BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
if (mem_binding) {
if (!mem_binding->sparse) {
@@ -467,8 +442,8 @@
}
// Check to see if memory was ever bound to this image
-bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
- const char *error_code) {
+bool CoreChecks::ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
+ const char *error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem, HandleToUint64(image_state->image), api_name, "Image",
@@ -478,8 +453,8 @@
}
// Check to see if memory was bound to this buffer
-bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
- const char *error_code) {
+bool CoreChecks::ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
+ const char *error_code) {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem, HandleToUint64(buffer_state->buffer), api_name,
@@ -490,8 +465,8 @@
// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object.
// Corresponding valid usage checks are in ValidateSetMemBinding().
-static void SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
- uint64_t handle, VulkanObjectType type) {
+void CoreChecks::SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, BINDABLE *mem_binding, VkDeviceSize memory_offset,
+ uint64_t handle, VulkanObjectType type) {
assert(mem_binding);
mem_binding->binding.mem = mem;
mem_binding->UpdateBoundMemorySet(); // force recreation of cached set
@@ -524,8 +499,8 @@
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
-static bool ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
- const char *apiName) {
+bool CoreChecks::ValidateSetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
+ const char *apiName) {
bool skip = false;
// It's an error to bind an object to NULL memory
if (mem != VK_NULL_HANDLE) {
@@ -583,7 +558,7 @@
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
-static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
+bool CoreChecks::SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type) {
bool skip = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (binding.mem == VK_NULL_HANDLE) {
@@ -605,8 +580,8 @@
return skip;
}
-bool ValidateDeviceQueueFamily(layer_data *device_data, uint32_t queue_family, const char *cmd_name, const char *parameter_name,
- const char *error_code, bool optional = false) {
+bool CoreChecks::ValidateDeviceQueueFamily(layer_data *device_data, uint32_t queue_family, const char *cmd_name,
+ const char *parameter_name, const char *error_code, bool optional = false) {
bool skip = false;
if (!optional && queue_family == VK_QUEUE_FAMILY_IGNORED) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
@@ -625,9 +600,9 @@
return skip;
}
-bool ValidateQueueFamilies(layer_data *device_data, uint32_t queue_family_count, const uint32_t *queue_families,
- const char *cmd_name, const char *array_parameter_name, const char *unique_error_code,
- const char *valid_error_code, bool optional = false) {
+bool CoreChecks::ValidateQueueFamilies(layer_data *device_data, uint32_t queue_family_count, const uint32_t *queue_families,
+ const char *cmd_name, const char *array_parameter_name, const char *unique_error_code,
+ const char *valid_error_code, bool optional = false) {
bool skip = false;
if (queue_families) {
std::unordered_set<uint32_t> set;
@@ -650,8 +625,8 @@
}
// Check object status for selected flag state
-static bool ValidateStatus(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
- const char *fail_msg, const char *msg_code) {
+bool CoreChecks::ValidateStatus(layer_data *dev_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
+ const char *fail_msg, const char *msg_code) {
if (!(pNode->status & status_mask)) {
return log_msg(dev_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(pNode->commandBuffer), msg_code, "command buffer object %s: %s..",
@@ -661,7 +636,7 @@
}
// Retrieve pipeline node ptr for given pipeline object
-PIPELINE_STATE *GetPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
+PIPELINE_STATE *CoreChecks::GetPipelineState(layer_data const *dev_data, VkPipeline pipeline) {
auto it = dev_data->pipelineMap.find(pipeline);
if (it == dev_data->pipelineMap.end()) {
return nullptr;
@@ -669,7 +644,7 @@
return it->second.get();
}
-RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
+RENDER_PASS_STATE *CoreChecks::GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass) {
auto it = dev_data->renderPassMap.find(renderpass);
if (it == dev_data->renderPassMap.end()) {
return nullptr;
@@ -677,7 +652,7 @@
return it->second.get();
}
-std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
+std::shared_ptr<RENDER_PASS_STATE> CoreChecks::GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass) {
auto it = dev_data->renderPassMap.find(renderpass);
if (it == dev_data->renderPassMap.end()) {
return nullptr;
@@ -685,7 +660,7 @@
return it->second;
}
-FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
+FRAMEBUFFER_STATE *CoreChecks::GetFramebufferState(const layer_data *dev_data, VkFramebuffer framebuffer) {
auto it = dev_data->frameBufferMap.find(framebuffer);
if (it == dev_data->frameBufferMap.end()) {
return nullptr;
@@ -702,7 +677,7 @@
return it->second;
}
-static PIPELINE_LAYOUT_NODE const *GetPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
+PIPELINE_LAYOUT_NODE const *CoreChecks::GetPipelineLayout(layer_data const *dev_data, VkPipelineLayout pipeLayout) {
auto it = dev_data->pipelineLayoutMap.find(pipeLayout);
if (it == dev_data->pipelineLayoutMap.end()) {
return nullptr;
@@ -710,7 +685,7 @@
return &it->second;
}
-shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
+shader_module const *CoreChecks::GetShaderModuleState(layer_data const *dev_data, VkShaderModule module) {
auto it = dev_data->shaderModuleMap.find(module);
if (it == dev_data->shaderModuleMap.end()) {
return nullptr;
@@ -718,8 +693,8 @@
return it->second.get();
}
-const TEMPLATE_STATE *GetDescriptorTemplateState(const layer_data *dev_data,
- VkDescriptorUpdateTemplateKHR descriptor_update_template) {
+const TEMPLATE_STATE *CoreChecks::GetDescriptorTemplateState(const layer_data *dev_data,
+ VkDescriptorUpdateTemplateKHR descriptor_update_template) {
const auto it = dev_data->desc_template_map.find(descriptor_update_template);
if (it == dev_data->desc_template_map.cend()) {
return nullptr;
@@ -738,8 +713,8 @@
}
// Validate state stored as flags at time of draw call
-static bool ValidateDrawStateFlags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
- const char *msg_code) {
+bool CoreChecks::ValidateDrawStateFlags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
+ const char *msg_code) {
bool result = false;
if (pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_LIST ||
pPipe->topology_at_rasterizer == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP) {
@@ -777,9 +752,10 @@
return result;
}
-static bool LogInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
- const char *type2_string, const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
- uint32_t secondary_attach, const char *msg, const char *caller, const char *error_code) {
+bool CoreChecks::LogInvalidAttachmentMessage(layer_data const *dev_data, const char *type1_string,
+ const RENDER_PASS_STATE *rp1_state, const char *type2_string,
+ const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
+ const char *msg, const char *caller, const char *error_code) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
HandleToUint64(rp1_state->renderPass), error_code,
"%s: RenderPasses incompatible between %s w/ renderPass %s and %s w/ renderPass %s Attachment %u is not "
@@ -788,10 +764,10 @@
dev_data->report_data->FormatHandle(rp2_state->renderPass).c_str(), primary_attach, secondary_attach, msg);
}
-static bool ValidateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
- const RENDER_PASS_STATE *rp1_state, const char *type2_string,
- const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach, uint32_t secondary_attach,
- const char *caller, const char *error_code) {
+bool CoreChecks::ValidateAttachmentCompatibility(layer_data const *dev_data, const char *type1_string,
+ const RENDER_PASS_STATE *rp1_state, const char *type2_string,
+ const RENDER_PASS_STATE *rp2_state, uint32_t primary_attach,
+ uint32_t secondary_attach, const char *caller, const char *error_code) {
bool skip = false;
const auto &primaryPassCI = rp1_state->createInfo;
const auto &secondaryPassCI = rp2_state->createInfo;
@@ -830,9 +806,10 @@
return skip;
}
-static bool ValidateSubpassCompatibility(layer_data const *dev_data, const char *type1_string, const RENDER_PASS_STATE *rp1_state,
- const char *type2_string, const RENDER_PASS_STATE *rp2_state, const int subpass,
- const char *caller, const char *error_code) {
+bool CoreChecks::ValidateSubpassCompatibility(layer_data const *dev_data, const char *type1_string,
+ const RENDER_PASS_STATE *rp1_state, const char *type2_string,
+ const RENDER_PASS_STATE *rp2_state, const int subpass, const char *caller,
+ const char *error_code) {
bool skip = false;
const auto &primary_desc = rp1_state->createInfo.pSubpasses[subpass];
const auto &secondary_desc = rp2_state->createInfo.pSubpasses[subpass];
@@ -886,9 +863,9 @@
// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
// will then feed into this function
-static bool ValidateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
- const RENDER_PASS_STATE *rp1_state, const char *type2_string,
- const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) {
+bool CoreChecks::ValidateRenderPassCompatibility(layer_data const *dev_data, const char *type1_string,
+ const RENDER_PASS_STATE *rp1_state, const char *type2_string,
+ const RENDER_PASS_STATE *rp2_state, const char *caller, const char *error_code) {
bool skip = false;
if (rp1_state->createInfo.subpassCount != rp2_state->createInfo.subpassCount) {
@@ -908,7 +885,7 @@
}
// Return Set node ptr for specified set or else NULL
-cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
+cvdescriptorset::DescriptorSet *CoreChecks::GetSetNode(const layer_data *dev_data, VkDescriptorSet set) {
auto set_it = dev_data->setMap.find(set);
if (set_it == dev_data->setMap.end()) {
return NULL;
@@ -938,8 +915,8 @@
}
// Validate draw-time state related to the PSO
-static bool ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
- CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
+bool CoreChecks::ValidatePipelineDrawtimeState(layer_data const *dev_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
+ CMD_TYPE cmd_type, PIPELINE_STATE const *pPipeline, const char *caller) {
bool skip = false;
// Verify vertex binding
@@ -1067,7 +1044,8 @@
subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
}
- if (!(dev_data->extensions.vk_amd_mixed_attachment_samples || dev_data->extensions.vk_nv_framebuffer_mixed_samples) &&
+ if (!(dev_data->device_extensions.vk_amd_mixed_attachment_samples ||
+ dev_data->device_extensions.vk_nv_framebuffer_mixed_samples) &&
((subpass_num_samples & static_cast<unsigned>(pso_num_samples)) != subpass_num_samples)) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
@@ -1171,9 +1149,9 @@
}
// Validate overall state at the time of a draw call
-static bool ValidateCmdBufDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
- const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code,
- const char *state_err_code) {
+bool CoreChecks::ValidateCmdBufDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, CMD_TYPE cmd_type, const bool indexed,
+ const VkPipelineBindPoint bind_point, const char *function, const char *pipe_err_code,
+ const char *state_err_code) {
bool result = false;
auto const &state = cb_node->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
@@ -1243,7 +1221,7 @@
return result;
}
-static void UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
+void CoreChecks::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
auto const &state = cb_state->lastBound[bind_point];
PIPELINE_STATE *pPipe = state.pipeline_state;
if (VK_NULL_HANDLE != state.pipeline_layout) {
@@ -1268,8 +1246,8 @@
}
}
-static bool ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
- int pipelineIndex) {
+bool CoreChecks::ValidatePipelineLocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
+ int pipelineIndex) {
bool skip = false;
PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
@@ -1309,8 +1287,8 @@
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the layer_data->* maps in this function.
-static bool ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
- int pipelineIndex) {
+bool CoreChecks::ValidatePipelineUnlocked(layer_data *dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines,
+ int pipelineIndex) {
bool skip = false;
PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
@@ -1438,7 +1416,7 @@
}
}
}
- if (dev_data->extensions.vk_nv_mesh_shader) {
+ if (dev_data->device_extensions.vk_nv_mesh_shader) {
// VS or mesh is required
if (!(pPipeline->active_shaders & (VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_MESH_BIT_NV))) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
@@ -1606,8 +1584,7 @@
VkFormat format = vi->pVertexAttributeDescriptions[j].format;
// Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
VkFormatProperties properties;
- dev_data->instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format,
- &properties);
+ dev_data->instance_dispatch_table.GetPhysicalDeviceFormatProperties(dev_data->physical_device, format, &properties);
if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
skip |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
@@ -1628,7 +1605,8 @@
}
};
- if (!(dev_data->extensions.vk_amd_mixed_attachment_samples || dev_data->extensions.vk_nv_framebuffer_mixed_samples)) {
+ if (!(dev_data->device_extensions.vk_amd_mixed_attachment_samples ||
+ dev_data->device_extensions.vk_nv_framebuffer_mixed_samples)) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_num_samples = 0;
@@ -1650,7 +1628,7 @@
}
}
- if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
+ if (dev_data->device_extensions.vk_amd_mixed_attachment_samples) {
VkSampleCountFlagBits max_sample_count = static_cast<VkSampleCountFlagBits>(0);
for (uint32_t i = 0; i < subpass_desc->colorAttachmentCount; ++i) {
if (subpass_desc->pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) {
@@ -1676,7 +1654,7 @@
}
}
- if (dev_data->extensions.vk_nv_framebuffer_mixed_samples) {
+ if (dev_data->device_extensions.vk_nv_framebuffer_mixed_samples) {
uint32_t raster_samples = static_cast<uint32_t>(GetNumSamples(pPipeline));
uint32_t subpass_color_samples = 0;
@@ -1740,7 +1718,7 @@
}
}
- if (dev_data->extensions.vk_nv_fragment_coverage_to_color) {
+ if (dev_data->device_extensions.vk_nv_fragment_coverage_to_color) {
const auto coverage_to_color_state =
lvl_find_in_chain<VkPipelineCoverageToColorStateCreateInfoNV>(pPipeline->graphicsPipelineCI.pMultisampleState);
@@ -1798,7 +1776,7 @@
// Block of code at start here specifically for managing/tracking DSs
// Return Pool node ptr for specified pool or else NULL
-DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
+DESCRIPTOR_POOL_STATE *CoreChecks::GetDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
auto pool_it = dev_data->descriptorPoolMap.find(pool);
if (pool_it == dev_data->descriptorPoolMap.end()) {
return NULL;
@@ -1810,8 +1788,8 @@
// func_str is the name of the calling function
// Return false if no errors occur
// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
-static bool ValidateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, const char *func_str) {
- if (dev_data->instance_data->disabled.idle_descriptor_set) return false;
+bool CoreChecks::ValidateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, const char *func_str) {
+ if (dev_data->disabled.idle_descriptor_set) return false;
bool skip = false;
auto set_node = dev_data->setMap.find(set);
if (set_node == dev_data->setMap.end()) {
@@ -1832,13 +1810,13 @@
}
// Remove set from setMap and delete the set
-static void FreeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
+void CoreChecks::FreeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
dev_data->setMap.erase(descriptor_set->GetSet());
delete descriptor_set;
}
// Free all DS Pools including their Sets & related sub-structs
// NOTE : Calls to this function should be wrapped in mutex
-static void DeletePools(layer_data *dev_data) {
+void CoreChecks::DeletePools(layer_data *dev_data) {
for (auto ii = dev_data->descriptorPoolMap.begin(); ii != dev_data->descriptorPoolMap.end();) {
// Remove this pools' sets from setMap and delete them
for (auto ds : ii->second->sets) {
@@ -1851,7 +1829,7 @@
}
// For given CB object, fetch associated CB Node from map
-GLOBAL_CB_NODE *GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
+GLOBAL_CB_NODE *CoreChecks::GetCBNode(layer_data const *dev_data, const VkCommandBuffer cb) {
auto it = dev_data->commandBufferMap.find(cb);
if (it == dev_data->commandBufferMap.end()) {
return NULL;
@@ -1860,7 +1838,7 @@
}
// If a renderpass is active, verify that the given command type is appropriate for current subpass state
-bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
+bool CoreChecks::ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
if (!pCB->activeRenderPass) return false;
bool skip = false;
if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
@@ -1877,11 +1855,11 @@
return skip;
}
-bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
- VkQueueFlags required_flags, const char *error_code) {
+bool CoreChecks::ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name,
+ VkQueueFlags required_flags, const char *error_code) {
auto pool = GetCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
if (pool) {
- VkQueueFlags queue_flags = GetPhysicalDeviceState(dev_data)->queue_family_properties[pool->queueFamilyIndex].queueFlags;
+ VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (!(required_flags & queue_flags)) {
string required_flags_string;
for (auto flag : {VK_QUEUE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT, VK_QUEUE_COMPUTE_BIT}) {
@@ -1907,7 +1885,7 @@
return "destroyed";
}
-static bool ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
+bool CoreChecks::ReportInvalidCommandBuffer(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const char *call_source) {
bool skip = false;
for (auto obj : cb_state->broken_bindings) {
const char *type_str = object_string[obj.type];
@@ -2006,7 +1984,7 @@
// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not in the recording state or if
// there's an issue with the Cmd ordering
-bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
+bool CoreChecks::ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name) {
switch (cb_state->state) {
case CB_RECORDING:
return ValidateCmdSubpassState(dev_data, cb_state, cmd);
@@ -2030,7 +2008,7 @@
}
// For given object struct return a ptr of BASE_NODE type for its wrapping struct
-BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
+BASE_NODE *CoreChecks::GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
BASE_NODE *base_ptr = nullptr;
switch (object_struct.type) {
case kVulkanObjectTypeDescriptorSet: {
@@ -2105,13 +2083,13 @@
cb_node->object_bindings.insert(obj);
}
// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
-static void RemoveCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
+void CoreChecks::RemoveCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
if (base_obj) base_obj->cb_bindings.erase(cb_node);
}
// Reset the command buffer state
// Maintain the createInfo and set state to CB_NEW, but clear all other state
-static void ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
+void CoreChecks::ResetCommandBufferState(layer_data *dev_data, const VkCommandBuffer cb) {
GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
if (pCB) {
pCB->in_use.store(0);
@@ -2238,7 +2216,7 @@
// Flags validation error if the associated call is made inside a render pass. The apiName routine should ONLY be called outside a
// render pass.
-bool InsideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) {
+bool CoreChecks::InsideRenderPass(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) {
bool inside = false;
if (pCB->activeRenderPass) {
inside = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -2251,7 +2229,7 @@
// Flags validation error if the associated call is made outside a render pass. The apiName
// routine should ONLY be called inside a render pass.
-bool OutsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) {
+bool CoreChecks::OutsideRenderPass(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode) {
bool outside = false;
if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
@@ -2263,7 +2241,7 @@
return outside;
}
-static void InitGpuValidation(instance_layer_data *instance_data) {
+void CoreChecks::InitGpuValidation(instance_layer_data *instance_data) {
// Process the layer settings file.
enum CoreValidationGpuFlagBits {
CORE_VALIDATION_GPU_VALIDATION_ALL_BIT = 0x00000001,
@@ -2277,71 +2255,17 @@
std::string gpu_flags_key = "lunarg_core_validation.gpu_validation";
CoreGPUFlags gpu_flags = GetLayerOptionFlags(gpu_flags_key, gpu_flags_option_definitions, 0);
if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_ALL_BIT) {
- instance_data->enabled.gpu_validation = true;
+ instance_data->instance_state->enabled.gpu_validation = true;
}
if (gpu_flags & CORE_VALIDATION_GPU_VALIDATION_RESERVE_BINDING_SLOT_BIT) {
- instance_data->enabled.gpu_validation_reserve_binding_slot = true;
+ instance_data->instance_state->enabled.gpu_validation_reserve_binding_slot = true;
}
}
-// For the given ValidationCheck enum, set all relevant instance disabled flags to true
-void SetDisabledFlags(instance_layer_data *instance_data, const VkValidationFlagsEXT *val_flags_struct) {
- for (uint32_t i = 0; i < val_flags_struct->disabledValidationCheckCount; ++i) {
- switch (val_flags_struct->pDisabledValidationChecks[i]) {
- case VK_VALIDATION_CHECK_SHADERS_EXT:
- instance_data->disabled.shader_validation = true;
- break;
- case VK_VALIDATION_CHECK_ALL_EXT:
- // Set all disabled flags to true
- instance_data->disabled.SetAll(true);
- break;
- default:
- break;
- }
- }
-}
-
-void SetValidationFeatures(instance_layer_data *instance_data, const VkValidationFeaturesEXT *val_features_struct) {
- for (uint32_t i = 0; i < val_features_struct->disabledValidationFeatureCount; ++i) {
- switch (val_features_struct->pDisabledValidationFeatures[i]) {
- case VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT:
- instance_data->disabled.shader_validation = true;
- break;
- case VK_VALIDATION_FEATURE_DISABLE_ALL_EXT:
- // Set all disabled flags to true
- instance_data->disabled.SetAll(true);
- break;
- default:
- break;
- }
- }
- for (uint32_t i = 0; i < val_features_struct->enabledValidationFeatureCount; ++i) {
- switch (val_features_struct->pEnabledValidationFeatures[i]) {
- case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT:
- instance_data->enabled.gpu_validation = true;
- break;
- case VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT:
- instance_data->enabled.gpu_validation_reserve_binding_slot = true;
- break;
- default:
- break;
- }
- }
-}
-
-void PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkInstance *pInstance, VkResult result) {
+void CoreChecks::PostCallRecordCreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
+ VkInstance *pInstance, VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
- // Parse any pNext chains
- const auto *validation_flags_ext = lvl_find_in_chain<VkValidationFlagsEXT>(pCreateInfo->pNext);
- if (validation_flags_ext) {
- SetDisabledFlags(instance_data, validation_flags_ext);
- }
- const auto *validation_features_ext = lvl_find_in_chain<VkValidationFeaturesEXT>(pCreateInfo->pNext);
- if (validation_features_ext) {
- SetValidationFeatures(instance_data, validation_features_ext);
- }
InitGpuValidation(instance_data);
}
@@ -2350,7 +2274,7 @@
const char *queue_family_var_name) {
bool skip = false;
- const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
+ const char *conditional_ext_cmd = instance_data->instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
@@ -2398,7 +2322,7 @@
const auto requested_queue_count = infos[i].queueCount;
const auto queue_family_props_count = pd_state->queue_family_properties.size();
const bool queue_family_has_props = requested_queue_family < queue_family_props_count;
- const char *conditional_ext_cmd = instance_data->extensions.vk_khr_get_physical_device_properties_2
+ const char *conditional_ext_cmd = instance_data->instance_extensions.vk_khr_get_physical_device_properties_2
? " or vkGetPhysicalDeviceQueueFamilyProperties2[KHR]"
: "";
std::string count_note =
@@ -2423,15 +2347,15 @@
return skip;
}
-bool PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
+bool CoreChecks::PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
bool skip = false;
- auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
+ auto pd_state = GetPhysicalDeviceState(gpu);
// TODO: object_tracker should perhaps do this instead
// and it does not seem to currently work anyway -- the loader just crashes before this point
- if (!GetPhysicalDeviceState(instance_data, gpu)) {
+ if (!pd_state) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
0, kVUID_Core_DevLimit_MustQueryCount,
"Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
@@ -2441,21 +2365,20 @@
return skip;
}
-void PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkDevice *pDevice, std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info) {
+void CoreChecks::PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice,
+ std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
// GPU Validation can possibly turn on device features, so give it a chance to change the create info.
- if (instance_data->enabled.gpu_validation) {
+ if (GetEnables(instance_data)->gpu_validation) {
VkPhysicalDeviceFeatures supported_features;
- instance_data->dispatch_table.GetPhysicalDeviceFeatures(gpu, &supported_features);
+ instance_dispatch_table.GetPhysicalDeviceFeatures(gpu, &supported_features);
GpuPreCallRecordCreateDevice(gpu, modified_create_info, &supported_features);
}
}
-void PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map);
-
+void CoreChecks::PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, VkResult result) {
if (VK_SUCCESS != result) return;
const VkPhysicalDeviceFeatures *enabled_features_found = pCreateInfo->pEnabledFeatures;
@@ -2466,152 +2389,156 @@
}
}
- layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
+ ValidationObject *device_object = ::GetLayerDataPtr(::get_dispatch_key(*pDevice), ::layer_data_map);
+ ValidationObject *validation_data = GetValidationObject(device_object->object_dispatch, LayerObjectTypeCoreValidation);
+ CoreChecks *core_checks = static_cast<CoreChecks *>(validation_data);
+
if (nullptr == enabled_features_found) {
- device_data->enabled_features.core = {};
+ core_checks->enabled_features.core = {};
} else {
- device_data->enabled_features.core = *enabled_features_found;
+ core_checks->enabled_features.core = *enabled_features_found;
}
// Make sure that queue_family_properties are obtained for this device's physical_device, even if the app has not
// previously set them through an explicit API call.
uint32_t count;
- auto pd_state = GetPhysicalDeviceState(instance_data, gpu);
- instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
+ auto pd_state = GetPhysicalDeviceState(gpu);
+ instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
pd_state->queue_family_count = std::max(pd_state->queue_family_count, count);
pd_state->queue_family_properties.resize(std::max(static_cast<uint32_t>(pd_state->queue_family_properties.size()), count));
- instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]);
+ instance_dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, &pd_state->queue_family_properties[0]);
+ // Save local link to this device's physical device state
+ core_checks->physical_device_state = pd_state;
const auto *device_group_ci = lvl_find_in_chain<VkDeviceGroupDeviceCreateInfo>(pCreateInfo->pNext);
- device_data->physical_device_count =
+ core_checks->physical_device_count =
device_group_ci && device_group_ci->physicalDeviceCount > 0 ? device_group_ci->physicalDeviceCount : 1;
const auto *descriptor_indexing_features = lvl_find_in_chain<VkPhysicalDeviceDescriptorIndexingFeaturesEXT>(pCreateInfo->pNext);
if (descriptor_indexing_features) {
- device_data->enabled_features.descriptor_indexing = *descriptor_indexing_features;
+ core_checks->enabled_features.descriptor_indexing = *descriptor_indexing_features;
}
const auto *eight_bit_storage_features = lvl_find_in_chain<VkPhysicalDevice8BitStorageFeaturesKHR>(pCreateInfo->pNext);
if (eight_bit_storage_features) {
- device_data->enabled_features.eight_bit_storage = *eight_bit_storage_features;
+ core_checks->enabled_features.eight_bit_storage = *eight_bit_storage_features;
}
const auto *exclusive_scissor_features = lvl_find_in_chain<VkPhysicalDeviceExclusiveScissorFeaturesNV>(pCreateInfo->pNext);
if (exclusive_scissor_features) {
- device_data->enabled_features.exclusive_scissor = *exclusive_scissor_features;
+ core_checks->enabled_features.exclusive_scissor = *exclusive_scissor_features;
}
const auto *shading_rate_image_features = lvl_find_in_chain<VkPhysicalDeviceShadingRateImageFeaturesNV>(pCreateInfo->pNext);
if (shading_rate_image_features) {
- device_data->enabled_features.shading_rate_image = *shading_rate_image_features;
+ core_checks->enabled_features.shading_rate_image = *shading_rate_image_features;
}
const auto *mesh_shader_features = lvl_find_in_chain<VkPhysicalDeviceMeshShaderFeaturesNV>(pCreateInfo->pNext);
if (mesh_shader_features) {
- device_data->enabled_features.mesh_shader = *mesh_shader_features;
+ core_checks->enabled_features.mesh_shader = *mesh_shader_features;
}
const auto *inline_uniform_block_features =
lvl_find_in_chain<VkPhysicalDeviceInlineUniformBlockFeaturesEXT>(pCreateInfo->pNext);
if (inline_uniform_block_features) {
- device_data->enabled_features.inline_uniform_block = *inline_uniform_block_features;
+ core_checks->enabled_features.inline_uniform_block = *inline_uniform_block_features;
}
const auto *transform_feedback_features = lvl_find_in_chain<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(pCreateInfo->pNext);
if (transform_feedback_features) {
- device_data->enabled_features.transform_feedback_features = *transform_feedback_features;
+ core_checks->enabled_features.transform_feedback_features = *transform_feedback_features;
}
const auto *float16_int8_features = lvl_find_in_chain<VkPhysicalDeviceFloat16Int8FeaturesKHR>(pCreateInfo->pNext);
if (float16_int8_features) {
- device_data->enabled_features.float16_int8 = *float16_int8_features;
+ core_checks->enabled_features.float16_int8 = *float16_int8_features;
}
const auto *vtx_attrib_div_features = lvl_find_in_chain<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(pCreateInfo->pNext);
if (vtx_attrib_div_features) {
- device_data->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features;
+ core_checks->enabled_features.vtx_attrib_divisor_features = *vtx_attrib_div_features;
}
const auto *scalar_block_layout_features = lvl_find_in_chain<VkPhysicalDeviceScalarBlockLayoutFeaturesEXT>(pCreateInfo->pNext);
if (scalar_block_layout_features) {
- device_data->enabled_features.scalar_block_layout_features = *scalar_block_layout_features;
+ core_checks->enabled_features.scalar_block_layout_features = *scalar_block_layout_features;
}
const auto *buffer_address = lvl_find_in_chain<VkPhysicalDeviceBufferAddressFeaturesEXT>(pCreateInfo->pNext);
if (buffer_address) {
- device_data->enabled_features.buffer_address = *buffer_address;
+ core_checks->enabled_features.buffer_address = *buffer_address;
}
// Store physical device properties and physical device mem limits into device layer_data structs
- instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &device_data->phys_dev_mem_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &device_data->phys_dev_props);
+ instance_dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &core_checks->phys_dev_mem_props);
+ instance_dispatch_table.GetPhysicalDeviceProperties(gpu, &core_checks->phys_dev_props);
- if (device_data->extensions.vk_khr_push_descriptor) {
+ if (core_checks->device_extensions.vk_khr_push_descriptor) {
// Get the needed push_descriptor limits
auto push_descriptor_prop = lvl_init_struct<VkPhysicalDevicePushDescriptorPropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&push_descriptor_prop);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.max_push_descriptors = push_descriptor_prop.maxPushDescriptors;
}
- if (device_data->extensions.vk_ext_descriptor_indexing) {
+ if (core_checks->device_extensions.vk_ext_descriptor_indexing) {
// Get the needed descriptor_indexing limits
auto descriptor_indexing_props = lvl_init_struct<VkPhysicalDeviceDescriptorIndexingPropertiesEXT>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&descriptor_indexing_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.descriptor_indexing_props = descriptor_indexing_props;
}
- if (device_data->extensions.vk_nv_shading_rate_image) {
+ if (core_checks->device_extensions.vk_nv_shading_rate_image) {
// Get the needed shading rate image limits
auto shading_rate_image_props = lvl_init_struct<VkPhysicalDeviceShadingRateImagePropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&shading_rate_image_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.shading_rate_image_props = shading_rate_image_props;
}
- if (device_data->extensions.vk_nv_mesh_shader) {
+ if (core_checks->device_extensions.vk_nv_mesh_shader) {
// Get the needed mesh shader limits
auto mesh_shader_props = lvl_init_struct<VkPhysicalDeviceMeshShaderPropertiesNV>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&mesh_shader_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.mesh_shader_props = mesh_shader_props;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.mesh_shader_props = mesh_shader_props;
}
- if (device_data->extensions.vk_ext_inline_uniform_block) {
+ if (core_checks->device_extensions.vk_ext_inline_uniform_block) {
// Get the needed inline uniform block limits
auto inline_uniform_block_props = lvl_init_struct<VkPhysicalDeviceInlineUniformBlockPropertiesEXT>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&inline_uniform_block_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.inline_uniform_block_props = inline_uniform_block_props;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.inline_uniform_block_props = inline_uniform_block_props;
}
- if (device_data->extensions.vk_ext_vertex_attribute_divisor) {
+ if (core_checks->device_extensions.vk_ext_vertex_attribute_divisor) {
// Get the needed vertex attribute divisor limits
auto vtx_attrib_divisor_props = lvl_init_struct<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&vtx_attrib_divisor_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.vtx_attrib_divisor_props = vtx_attrib_divisor_props;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.vtx_attrib_divisor_props = vtx_attrib_divisor_props;
}
- if (device_data->extensions.vk_khr_depth_stencil_resolve) {
+ if (core_checks->device_extensions.vk_khr_depth_stencil_resolve) {
// Get the needed depth and stencil resolve modes
auto depth_stencil_resolve_props = lvl_init_struct<VkPhysicalDeviceDepthStencilResolvePropertiesKHR>();
auto prop2 = lvl_init_struct<VkPhysicalDeviceProperties2KHR>(&depth_stencil_resolve_props);
- instance_data->dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
- device_data->phys_dev_ext_props.depth_stencil_resolve_props = depth_stencil_resolve_props;
+ instance_dispatch_table.GetPhysicalDeviceProperties2KHR(gpu, &prop2);
+ core_checks->phys_dev_ext_props.depth_stencil_resolve_props = depth_stencil_resolve_props;
}
- if (GetEnables(device_data)->gpu_validation) {
+ if (GetEnables(core_checks)->gpu_validation) {
// Copy any needed instance data into the gpu validation state
- device_data->gpu_validation_state.reserve_binding_slot =
- device_data->instance_data->enabled.gpu_validation_reserve_binding_slot;
- GpuPostCallRecordCreateDevice(device_data);
+ core_checks->gpu_validation_state.reserve_binding_slot = GetEnables(core_checks)->gpu_validation_reserve_binding_slot;
+ core_checks->GpuPostCallRecordCreateDevice(core_checks);
}
// Store queue family data
if ((pCreateInfo != nullptr) && (pCreateInfo->pQueueCreateInfos != nullptr)) {
for (uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; ++i) {
- device_data->queue_family_index_map.insert(
+ core_checks->queue_family_index_map.insert(
std::make_pair(pCreateInfo->pQueueCreateInfos[i].queueFamilyIndex, pCreateInfo->pQueueCreateInfos[i].queueCount));
}
}
}
-void PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
if (!device) return;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (GetEnables(device_data)->gpu_validation) {
@@ -2680,7 +2607,7 @@
}
// Loop through bound objects and increment their in_use counts.
-static void IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
+void CoreChecks::IncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
for (auto obj : cb_node->object_bindings) {
auto base_obj = GetStateStructPtrFromObject(dev_data, obj);
if (base_obj) {
@@ -2689,7 +2616,7 @@
}
}
// Track which resources are in-flight by atomically incrementing their "in_use" count
-static void IncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
+void CoreChecks::IncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
cb_node->submitCount++;
cb_node->in_use.fetch_add(1);
@@ -2716,7 +2643,7 @@
// For the given queue, verify the queue state up to the given seq number.
// Currently the only check is to make sure that if there are events to be waited on prior to
// a QueryReset, make sure that all such events have been signalled.
-static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
+bool CoreChecks::VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *initial_queue, uint64_t initial_seq) {
bool skip = false;
// sequence number we want to validate up to, per queue
@@ -2760,7 +2687,7 @@
}
// When the given fence is retired, verify outstanding queue operations through the point of the fence
-static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
+bool CoreChecks::VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
auto fence_state = GetFenceNode(dev_data, fence);
if (fence_state && fence_state->scope == kSyncScopeInternal && VK_NULL_HANDLE != fence_state->signaler.first) {
return VerifyQueueStateToSeq(dev_data, GetQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
@@ -2769,7 +2696,7 @@
}
// Decrement in-use count for objects bound to command buffer
-static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
+void CoreChecks::DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
BASE_NODE *base_obj = nullptr;
for (auto obj : cb_node->object_bindings) {
base_obj = GetStateStructPtrFromObject(dev_data, obj);
@@ -2779,7 +2706,7 @@
}
}
-static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
+void CoreChecks::RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
// Roll this queue forward, one submission at a time.
@@ -2863,7 +2790,7 @@
pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
}
-static bool ValidateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
+bool CoreChecks::ValidateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count) {
bool skip = false;
if ((pCB->in_use.load() || current_submit_count > 1) &&
!(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
@@ -2875,8 +2802,8 @@
return skip;
}
-static bool ValidateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
- int current_submit_count, const char *vu_id) {
+bool CoreChecks::ValidateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, const char *call_source,
+ int current_submit_count, const char *vu_id) {
bool skip = false;
if (dev_data->instance_data->disabled.command_buffer_state) return skip;
// Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
@@ -2917,7 +2844,7 @@
return skip;
}
-static bool ValidateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
+bool CoreChecks::ValidateResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
bool skip = false;
// TODO : We should be able to remove the NULL look-up checks from the code below as long as
@@ -2938,8 +2865,8 @@
}
// Check that the queue family index of 'queue' matches one of the entries in pQueueFamilyIndices
-bool ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue, uint32_t count,
- const uint32_t *indices) {
+bool CoreChecks::ValidImageBufferQueue(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, const VK_OBJECT *object, VkQueue queue,
+ uint32_t count, const uint32_t *indices) {
bool found = false;
bool skip = false;
auto queue_state = GetQueueState(dev_data, queue);
@@ -2965,7 +2892,7 @@
// Validate that queueFamilyIndices of primary command buffers match this queue
// Secondary command buffers were previously validated in vkCmdExecuteCommands().
-static bool ValidateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
+bool CoreChecks::ValidateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
bool skip = false;
auto pPool = GetCommandPoolNode(dev_data, pCB->createInfo.commandPool);
auto queue_state = GetQueueState(dev_data, queue);
@@ -3001,9 +2928,9 @@
return skip;
}
-static bool ValidatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count,
- QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
- QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
+bool CoreChecks::ValidatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, int current_submit_count,
+ QFOTransferCBScoreboards<VkImageMemoryBarrier> *qfo_image_scoreboards,
+ QFOTransferCBScoreboards<VkBufferMemoryBarrier> *qfo_buffer_scoreboards) {
// Track in-use for resources off of primary and any secondary CBs
bool skip = false;
@@ -3036,7 +2963,7 @@
return skip;
}
-static bool ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
+bool CoreChecks::ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence) {
bool skip = false;
if (pFence && pFence->scope == kSyncScopeInternal) {
@@ -3062,7 +2989,8 @@
return skip;
}
-void PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence, VkResult result) {
+void CoreChecks::PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
uint64_t early_retire_seq = 0;
auto pQueue = GetQueueState(device_data, queue);
@@ -3175,7 +3103,7 @@
}
}
-bool PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
+bool CoreChecks::PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
auto pFence = GetFenceNode(device_data, fence);
bool skip = ValidateFenceForSubmit(device_data, pFence);
@@ -3353,8 +3281,8 @@
//
// AHB-extension new APIs
//
-bool PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer,
- VkAndroidHardwareBufferPropertiesANDROID *pProperties) {
+bool CoreChecks::PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer,
+ VkAndroidHardwareBufferPropertiesANDROID *pProperties) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
// buffer must be a valid Android hardware buffer object with at least one of the AHARDWAREBUFFER_USAGE_GPU_* usage flags.
@@ -3373,8 +3301,9 @@
return skip;
}
-void PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer,
- VkAndroidHardwareBufferPropertiesANDROID *pProperties, VkResult result) {
+void CoreChecks::PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer *buffer,
+ VkAndroidHardwareBufferPropertiesANDROID *pProperties,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
auto ahb_format_props = lvl_find_in_chain<VkAndroidHardwareBufferFormatPropertiesANDROID>(pProperties->pNext);
@@ -3384,8 +3313,9 @@
}
}
-bool PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
- struct AHardwareBuffer **pBuffer) {
+bool CoreChecks::PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device,
+ const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
+ struct AHardwareBuffer **pBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(device_data, pInfo->memory);
@@ -3422,7 +3352,7 @@
//
// AHB-specific validation within non-AHB APIs
//
-static bool ValidateAllocateMemoryANDROID(layer_data *dev_data, const VkMemoryAllocateInfo *alloc_info) {
+bool CoreChecks::ValidateAllocateMemoryANDROID(layer_data *dev_data, const VkMemoryAllocateInfo *alloc_info) {
bool skip = false;
auto import_ahb_info = lvl_find_in_chain<VkImportAndroidHardwareBufferInfoANDROID>(alloc_info->pNext);
auto exp_mem_alloc_info = lvl_find_in_chain<VkExportMemoryAllocateInfo>(alloc_info->pNext);
@@ -3465,9 +3395,9 @@
VkExternalBufferProperties ext_buf_props = {};
ext_buf_props.sType = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES;
- instance_layer_data *instance_data =
- GetLayerDataPtr(get_dispatch_key(dev_data->instance_data->instance), instance_layer_data_map);
- instance_data->dispatch_table.GetPhysicalDeviceExternalBufferProperties(dev_data->physical_device, &pdebi, &ext_buf_props);
+ instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->instance_data->instance), layer_data_map);
+ instance_data->instance_dispatch_table.GetPhysicalDeviceExternalBufferProperties(dev_data->physical_device, &pdebi,
+ &ext_buf_props);
// Collect external format info
VkPhysicalDeviceExternalImageFormatInfo pdeifi = {};
@@ -3518,7 +3448,8 @@
VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
ahb_props.sType = VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
ahb_props.pNext = &ahb_format_props;
- dev_data->dispatch_table.GetAndroidHardwareBufferPropertiesANDROID(dev_data->device, import_ahb_info->buffer, &ahb_props);
+ dev_data->device_dispatch_table.GetAndroidHardwareBufferPropertiesANDROID(dev_data->device, import_ahb_info->buffer,
+ &ahb_props);
// allocationSize must be the size returned by vkGetAndroidHardwareBufferPropertiesANDROID for the Android hardware buffer
if (alloc_info->allocationSize != ahb_props.allocationSize) {
@@ -3672,9 +3603,9 @@
return skip;
}
-bool ValidateGetImageMemoryRequirements2ANDROID(layer_data *dev_data, const VkImage image) {
+bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(layer_data *dev_data, const VkImage image) {
bool skip = false;
- const debug_report_data *report_data = core_validation::GetReportData(dev_data);
+ const debug_report_data *report_data = GetReportData(dev_data);
IMAGE_STATE *image_state = GetImageState(dev_data, image);
if (image_state->imported_ahb && (0 == image_state->GetBoundMemory().size())) {
@@ -3708,8 +3639,8 @@
return skip;
}
-static bool ValidateCreateSamplerYcbcrConversionANDROID(const layer_data *dev_data,
- const VkSamplerYcbcrConversionCreateInfo *create_info) {
+bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const layer_data *dev_data,
+ const VkSamplerYcbcrConversionCreateInfo *create_info) {
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if ((nullptr != ext_format_android) && (0 != ext_format_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
@@ -3729,21 +3660,22 @@
return false;
}
-static void RecordCreateSamplerYcbcrConversionANDROID(layer_data *dev_data, const VkSamplerYcbcrConversionCreateInfo *create_info,
- VkSamplerYcbcrConversion ycbcr_conversion) {
+void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(layer_data *dev_data,
+ const VkSamplerYcbcrConversionCreateInfo *create_info,
+ VkSamplerYcbcrConversion ycbcr_conversion) {
const VkExternalFormatANDROID *ext_format_android = lvl_find_in_chain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_format_android && (0 != ext_format_android->externalFormat)) {
dev_data->ycbcr_conversion_ahb_fmt_map.emplace(ycbcr_conversion, ext_format_android->externalFormat);
}
};
-static void RecordDestroySamplerYcbcrConversionANDROID(layer_data *dev_data, VkSamplerYcbcrConversion ycbcr_conversion) {
+void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(layer_data *dev_data, VkSamplerYcbcrConversion ycbcr_conversion) {
dev_data->ycbcr_conversion_ahb_fmt_map.erase(ycbcr_conversion);
};
#else // !VK_USE_PLATFORM_ANDROID_KHR
-static bool ValidateAllocateMemoryANDROID(layer_data *dev_data, const VkMemoryAllocateInfo *alloc_info) { return false; }
+bool CoreChecks::ValidateAllocateMemoryANDROID(layer_data *dev_data, const VkMemoryAllocateInfo *alloc_info) { return false; }
static bool ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(const debug_report_data *report_data,
const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
@@ -3751,22 +3683,23 @@
return false;
}
-static bool ValidateCreateSamplerYcbcrConversionANDROID(const layer_data *dev_data,
- const VkSamplerYcbcrConversionCreateInfo *create_info) {
+bool CoreChecks::ValidateCreateSamplerYcbcrConversionANDROID(const layer_data *dev_data,
+ const VkSamplerYcbcrConversionCreateInfo *create_info) {
return false;
}
-bool ValidateGetImageMemoryRequirements2ANDROID(layer_data *dev_data, const VkImage image) { return false; }
+bool CoreChecks::ValidateGetImageMemoryRequirements2ANDROID(layer_data *dev_data, const VkImage image) { return false; }
-static void RecordCreateSamplerYcbcrConversionANDROID(layer_data *dev_data, const VkSamplerYcbcrConversionCreateInfo *create_info,
- VkSamplerYcbcrConversion ycbcr_conversion){};
+void CoreChecks::RecordCreateSamplerYcbcrConversionANDROID(layer_data *dev_data,
+ const VkSamplerYcbcrConversionCreateInfo *create_info,
+ VkSamplerYcbcrConversion ycbcr_conversion){};
-static void RecordDestroySamplerYcbcrConversionANDROID(layer_data *dev_data, VkSamplerYcbcrConversion ycbcr_conversion){};
+void CoreChecks::RecordDestroySamplerYcbcrConversionANDROID(layer_data *dev_data, VkSamplerYcbcrConversion ycbcr_conversion){};
#endif // VK_USE_PLATFORM_ANDROID_KHR
-bool PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
- const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
+bool CoreChecks::PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
if (device_data->memObjMap.size() >= device_data->phys_dev_props.limits.maxMemoryAllocationCount) {
@@ -3789,8 +3722,8 @@
return skip;
}
-void PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
- const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory, VkResult result) {
+void CoreChecks::PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory, VkResult result) {
if (VK_SUCCESS == result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
AddMemObjInfo(device_data, device, *pMemory, pAllocateInfo);
@@ -3799,8 +3732,8 @@
}
// For given obj node, if it is use, flag a validation error and return callback result, else return false
-bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
- const char *error_code) {
+bool CoreChecks::ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
+ const char *caller_name, const char *error_code) {
if (dev_data->instance_data->disabled.object_in_use) return false;
bool skip = false;
if (obj_node->in_use.load()) {
@@ -3812,7 +3745,7 @@
return skip;
}
-bool PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(device_data, mem);
VK_OBJECT obj_struct = {HandleToUint64(mem), kVulkanObjectTypeDeviceMemory};
@@ -3823,7 +3756,7 @@
return skip;
}
-void PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!mem) return;
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(device_data, mem);
@@ -3902,7 +3835,7 @@
return skip;
}
-static void StoreMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
+void CoreChecks::StoreMemRanges(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->mem_range.offset = offset;
@@ -3913,8 +3846,8 @@
// Guard value for pad data
static char NoncoherentMemoryFillValue = 0xb;
-static void InitializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
- void **ppData) {
+void CoreChecks::InitializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
+ void **ppData) {
auto mem_info = GetMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->p_driver_data = *ppData;
@@ -3952,7 +3885,7 @@
// Verify that state for fence being waited on is appropriate. That is,
// a fence being waited on should not already be signaled and
// it should have been submitted on a queue or during acquire next image
-static inline bool VerifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
+bool CoreChecks::VerifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
bool skip = false;
auto pFence = GetFenceNode(dev_data, fence);
@@ -3967,7 +3900,7 @@
return skip;
}
-static void RetireFence(layer_data *dev_data, VkFence fence) {
+void CoreChecks::RetireFence(layer_data *dev_data, VkFence fence) {
auto pFence = GetFenceNode(dev_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
if (pFence->signaler.first != VK_NULL_HANDLE) {
@@ -3981,8 +3914,8 @@
}
}
-bool PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
- uint64_t timeout) {
+bool CoreChecks::PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
+ uint64_t timeout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Verify fence status of submitted fences
if (device_data->instance_data->disabled.wait_for_fences) return false;
@@ -3994,8 +3927,8 @@
return skip;
}
-void PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout,
- VkResult result) {
+void CoreChecks::PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll,
+ uint64_t timeout, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
@@ -4010,18 +3943,18 @@
// vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
}
-bool PreCallValidateGetFenceStatus(VkDevice device, VkFence fence) {
+bool CoreChecks::PreCallValidateGetFenceStatus(VkDevice device, VkFence fence) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return VerifyWaitFenceState(device_data, fence, "vkGetFenceStatus()");
}
-void PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
+void CoreChecks::PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RetireFence(device_data, fence);
}
-static void RecordGetDeviceQueueState(layer_data *device_data, uint32_t queue_family_index, VkQueue queue) {
+void CoreChecks::RecordGetDeviceQueueState(layer_data *device_data, uint32_t queue_family_index, VkQueue queue) {
// Add queue to tracking set only if it is new
auto queue_is_new = device_data->queues.emplace(queue);
if (queue_is_new.second == true) {
@@ -4032,8 +3965,8 @@
}
}
-static bool ValidateGetDeviceQueue(layer_data *device_data, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue,
- const char *valid_qfi_vuid, const char *qfi_in_range_vuid) {
+bool CoreChecks::ValidateGetDeviceQueue(layer_data *device_data, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue,
+ const char *valid_qfi_vuid, const char *qfi_in_range_vuid) {
bool skip = false;
skip |= ValidateDeviceQueueFamily(device_data, queueFamilyIndex, "vkGetDeviceQueue", "queueFamilyIndex", valid_qfi_vuid);
@@ -4049,37 +3982,37 @@
return skip;
}
-bool PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
+bool CoreChecks::PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateGetDeviceQueue(device_data, queueFamilyIndex, queueIndex, pQueue, "VUID-vkGetDeviceQueue-queueFamilyIndex-00384",
"VUID-vkGetDeviceQueue-queueIndex-00385");
}
-void PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
+void CoreChecks::PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetDeviceQueueState(device_data, queueFamilyIndex, *pQueue);
}
-void PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
+void CoreChecks::PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetDeviceQueueState(device_data, pQueueInfo->queueFamilyIndex, *pQueue);
}
-bool PreCallValidateQueueWaitIdle(VkQueue queue) {
+bool CoreChecks::PreCallValidateQueueWaitIdle(VkQueue queue) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
QUEUE_STATE *queue_state = GetQueueState(device_data, queue);
if (device_data->instance_data->disabled.queue_wait_idle) return false;
return VerifyQueueStateToSeq(device_data, queue_state, queue_state->seq + queue_state->submissions.size());
}
-void PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
+void CoreChecks::PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (VK_SUCCESS != result) return;
QUEUE_STATE *queue_state = GetQueueState(device_data, queue);
RetireWorkOnQueue(device_data, queue_state, queue_state->seq + queue_state->submissions.size());
}
-bool PreCallValidateDeviceWaitIdle(VkDevice device) {
+bool CoreChecks::PreCallValidateDeviceWaitIdle(VkDevice device) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (device_data->instance_data->disabled.device_wait_idle) return false;
bool skip = false;
@@ -4089,7 +4022,7 @@
return skip;
}
-void PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
+void CoreChecks::PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
for (auto &queue : device_data->queueMap) {
@@ -4097,7 +4030,7 @@
}
}
-bool PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
FENCE_NODE *fence_node = GetFenceNode(device_data, fence);
bool skip = false;
@@ -4111,13 +4044,13 @@
return skip;
}
-void PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!fence) return;
device_data->fenceMap.erase(fence);
}
-bool PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(device_data, semaphore);
VK_OBJECT obj_struct = {HandleToUint64(semaphore), kVulkanObjectTypeSemaphore};
@@ -4130,13 +4063,13 @@
return skip;
}
-void PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!semaphore) return;
device_data->semaphoreMap.erase(semaphore);
}
-bool PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
EVENT_STATE *event_state = GetEventNode(device_data, event);
VK_OBJECT obj_struct = {HandleToUint64(event), kVulkanObjectTypeEvent};
@@ -4147,7 +4080,7 @@
return skip;
}
-void PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!event) return;
EVENT_STATE *event_state = GetEventNode(device_data, event);
@@ -4156,7 +4089,7 @@
device_data->eventMap.erase(event);
}
-bool PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
QUERY_POOL_NODE *qp_state = GetQueryPoolNode(device_data, queryPool);
VK_OBJECT obj_struct = {HandleToUint64(queryPool), kVulkanObjectTypeQueryPool};
@@ -4168,7 +4101,7 @@
return skip;
}
-void PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!queryPool) return;
QUERY_POOL_NODE *qp_state = GetQueryPoolNode(device_data, queryPool);
@@ -4177,8 +4110,9 @@
device_data->queryPoolMap.erase(queryPool);
}
-bool PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
- size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
+bool CoreChecks::PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, size_t dataSize, void *pData, VkDeviceSize stride,
+ VkQueryResultFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto query_pool_state = device_data->queryPoolMap.find(queryPool);
@@ -4194,9 +4128,9 @@
return skip;
}
-void PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
- size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags,
- VkResult result) {
+void CoreChecks::PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
+ size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if ((VK_SUCCESS != result) && (VK_NOT_READY != result)) return;
@@ -4238,8 +4172,8 @@
// In the case where padding is required, if an alias is encountered then a validation error is reported and skip
// may be set by the callback function so caller should merge in skip value if padding case is possible.
// This check can be skipped by passing skip_checks=true, for call sites outside the validation path.
-static bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
- bool skip_checks) {
+bool CoreChecks::RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip,
+ bool skip_checks) {
*skip = false;
auto r1_start = range1->start;
auto r1_end = range1->end;
@@ -4271,7 +4205,7 @@
return true;
}
// Simplified RangesIntersect that calls above function to check range1 for intersection with offset & end addresses
-bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
+bool CoreChecks::RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
// Create a local MEMORY_RANGE struct to wrap offset/size
MEMORY_RANGE range_wrap;
// Synch linear with range1 to avoid padding and potential validation error case
@@ -4282,9 +4216,9 @@
return RangesIntersect(dev_data, range1, &range_wrap, &tmp_bool, true);
}
-static bool ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
- VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
- bool is_linear, const char *api_name) {
+bool CoreChecks::ValidateInsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
+ VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image,
+ bool is_linear, const char *api_name) {
bool skip = false;
MEMORY_RANGE range;
@@ -4328,8 +4262,8 @@
// Return true if an error is flagged and the user callback returns "true", otherwise false
// is_image indicates an image object, otherwise handle is for a buffer
// is_linear indicates a buffer or linear image
-static void InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
- VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
+void CoreChecks::InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info,
+ VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
MEMORY_RANGE range;
range.image = is_image;
@@ -4362,22 +4296,22 @@
mem_info->bound_buffers.insert(handle);
}
-static bool ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
- VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
- const char *api_name) {
+bool CoreChecks::ValidateInsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
+ VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
+ const char *api_name) {
return ValidateInsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear, api_name);
}
-static void InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
- VkMemoryRequirements mem_reqs, bool is_linear) {
+void CoreChecks::InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info,
+ VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear) {
InsertMemoryRange(dev_data, HandleToUint64(image), mem_info, mem_offset, mem_reqs, true, is_linear);
}
-static bool ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
- VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
+bool CoreChecks::ValidateInsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
+ VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char *api_name) {
return ValidateInsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true, api_name);
}
-static void InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
- VkMemoryRequirements mem_reqs) {
+void CoreChecks::InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info,
+ VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs) {
InsertMemoryRange(dev_data, HandleToUint64(buffer), mem_info, mem_offset, mem_reqs, false, true);
}
@@ -4399,12 +4333,12 @@
}
}
-void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
+void CoreChecks::RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
-void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
+void CoreChecks::RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
-static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
- const char *funcName, const char *msgCode) {
+bool CoreChecks::ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
+ const char *funcName, const char *msgCode) {
bool skip = false;
if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
@@ -4417,8 +4351,8 @@
return skip;
}
-bool ValidateBindBufferMemory(layer_data *device_data, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
- const char *api_name) {
+bool CoreChecks::ValidateBindBufferMemory(layer_data *device_data, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ const char *api_name) {
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
bool skip = false;
@@ -4436,7 +4370,8 @@
"%s: Binding memory to buffer %s but vkGetBufferMemoryRequirements() has not been called on that buffer.",
api_name, device_data->report_data->FormatHandle(buffer_handle).c_str());
// Make the call for them so we can verify the state
- device_data->dispatch_table.GetBufferMemoryRequirements(device_data->device, buffer, &buffer_state->requirements);
+ device_data->device_dispatch_table.GetBufferMemoryRequirements(device_data->device, buffer,
+ &buffer_state->requirements);
}
// Validate bound memory range information
@@ -4489,13 +4424,14 @@
return skip;
}
-bool PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
+bool CoreChecks::PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
const char *api_name = "vkBindBufferMemory()";
return ValidateBindBufferMemory(device_data, buffer, mem, memoryOffset, api_name);
}
-void UpdateBindBufferMemoryState(layer_data *device_data, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
+void CoreChecks::UpdateBindBufferMemoryState(layer_data *device_data, VkBuffer buffer, VkDeviceMemory mem,
+ VkDeviceSize memoryOffset) {
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
if (buffer_state) {
// Track bound memory range information
@@ -4509,14 +4445,15 @@
}
}
-void PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
- VkResult result) {
+void CoreChecks::PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
UpdateBindBufferMemoryState(device_data, buffer, mem, memoryOffset);
}
-bool PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
+bool CoreChecks::PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
char api_name[64];
bool skip = false;
@@ -4529,7 +4466,8 @@
return skip;
}
-bool PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos) {
+bool CoreChecks::PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR *pBindInfos) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
char api_name[64];
bool skip = false;
@@ -4542,24 +4480,24 @@
return skip;
}
-void PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos,
- VkResult result) {
+void CoreChecks::PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindBufferMemoryState(device_data, pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
}
}
-void PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR *pBindInfos,
- VkResult result) {
+void CoreChecks::PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfoKHR *pBindInfos, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
for (uint32_t i = 0; i < bindInfoCount; i++) {
UpdateBindBufferMemoryState(device_data, pBindInfos[i].buffer, pBindInfos[i].memory, pBindInfos[i].memoryOffset);
}
}
-static void RecordGetBufferMemoryRequirementsState(layer_data *device_data, VkBuffer buffer,
- VkMemoryRequirements *pMemoryRequirements) {
+void CoreChecks::RecordGetBufferMemoryRequirementsState(layer_data *device_data, VkBuffer buffer,
+ VkMemoryRequirements *pMemoryRequirements) {
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
if (buffer_state) {
buffer_state->requirements = *pMemoryRequirements;
@@ -4567,24 +4505,25 @@
}
}
-void PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
+void CoreChecks::PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer,
+ VkMemoryRequirements *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetBufferMemoryRequirementsState(device_data, buffer, pMemoryRequirements);
}
-void PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
- VkMemoryRequirements2KHR *pMemoryRequirements) {
+void CoreChecks::PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
+ VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetBufferMemoryRequirementsState(device_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
-void PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
- VkMemoryRequirements2KHR *pMemoryRequirements) {
+void CoreChecks::PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR *pInfo,
+ VkMemoryRequirements2KHR *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetBufferMemoryRequirementsState(device_data, pInfo->buffer, &pMemoryRequirements->memoryRequirements);
}
-static bool ValidateGetImageMemoryRequirements2(layer_data *dev_data, const VkImageMemoryRequirementsInfo2 *pInfo) {
+bool CoreChecks::ValidateGetImageMemoryRequirements2(layer_data *dev_data, const VkImageMemoryRequirementsInfo2 *pInfo) {
bool skip = false;
if (GetDeviceExtensions(dev_data)->vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageMemoryRequirements2ANDROID(dev_data, pInfo->image);
@@ -4592,20 +4531,20 @@
return skip;
}
-bool PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
- VkMemoryRequirements2 *pMemoryRequirements) {
+bool CoreChecks::PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateGetImageMemoryRequirements2(device_data, pInfo);
}
-bool PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
- VkMemoryRequirements2 *pMemoryRequirements) {
+bool CoreChecks::PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateGetImageMemoryRequirements2(device_data, pInfo);
}
-static void RecordGetImageMemoryRequiementsState(layer_data *device_data, VkImage image,
- VkMemoryRequirements *pMemoryRequirements) {
+void CoreChecks::RecordGetImageMemoryRequiementsState(layer_data *device_data, VkImage image,
+ VkMemoryRequirements *pMemoryRequirements) {
IMAGE_STATE *image_state = GetImageState(device_data, image);
if (image_state) {
image_state->requirements = *pMemoryRequirements;
@@ -4613,19 +4552,20 @@
}
}
-void PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
+void CoreChecks::PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image,
+ VkMemoryRequirements *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetImageMemoryRequiementsState(device_data, image, pMemoryRequirements);
}
-void PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
- VkMemoryRequirements2 *pMemoryRequirements) {
+void CoreChecks::PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetImageMemoryRequiementsState(device_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
}
-void PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
- VkMemoryRequirements2 *pMemoryRequirements) {
+void CoreChecks::PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2 *pInfo,
+ VkMemoryRequirements2 *pMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordGetImageMemoryRequiementsState(device_data, pInfo->image, &pMemoryRequirements->memoryRequirements);
}
@@ -4638,8 +4578,9 @@
}
}
-void PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t *pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
+void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image,
+ uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements *pSparseMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto image_state = GetImageState(device_data, image);
@@ -4650,9 +4591,10 @@
}
}
-void PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
- uint32_t *pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
+void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device,
+ const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
+ uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto image_state = GetImageState(device_data, pInfo->image);
@@ -4664,9 +4606,9 @@
}
}
-void PostCallRecordGetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo,
- uint32_t *pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
+void CoreChecks::PostCallRecordGetImageSparseMemoryRequirements2KHR(
+ VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR *pInfo, uint32_t *pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2KHR *pSparseMemoryRequirements) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto image_state = GetImageState(device_data, pInfo->image);
@@ -4678,9 +4620,9 @@
}
}
-bool PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
- VkImageFormatProperties2 *pImageFormatProperties) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
+ VkImageFormatProperties2 *pImageFormatProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(instance_data->report_data, pImageFormatInfo,
@@ -4688,9 +4630,9 @@
return skip;
}
-bool PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
- VkImageFormatProperties2 *pImageFormatProperties) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2 *pImageFormatInfo,
+ VkImageFormatProperties2 *pImageFormatProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
// Can't wrap AHB-specific validation in a device extension check here, but no harm
bool skip = ValidateGetPhysicalDeviceImageFormatProperties2ANDROID(instance_data->report_data, pImageFormatInfo,
@@ -4698,13 +4640,14 @@
return skip;
}
-void PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!shaderModule) return;
device_data->shaderModuleMap.erase(shaderModule);
}
-bool PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
PIPELINE_STATE *pipeline_state = GetPipelineState(device_data, pipeline);
VK_OBJECT obj_struct = {HandleToUint64(pipeline), kVulkanObjectTypePipeline};
@@ -4717,7 +4660,7 @@
return skip;
}
-void PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!pipeline) return;
PIPELINE_STATE *pipeline_state = GetPipelineState(device_data, pipeline);
@@ -4730,13 +4673,14 @@
device_data->pipelineMap.erase(pipeline);
}
-void PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!pipelineLayout) return;
device_data->pipelineLayoutMap.erase(pipelineLayout);
}
-bool PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
SAMPLER_STATE *sampler_state = GetSamplerState(device_data, sampler);
VK_OBJECT obj_struct = {HandleToUint64(sampler), kVulkanObjectTypeSampler};
@@ -4749,7 +4693,7 @@
return skip;
}
-void PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!sampler) return;
SAMPLER_STATE *sampler_state = GetSamplerState(device_data, sampler);
@@ -4761,8 +4705,8 @@
device_data->samplerMap.erase(sampler);
}
-void PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!descriptorSetLayout) return;
auto layout_it = device_data->descriptorSetLayoutMap.find(descriptorSetLayout);
@@ -4772,8 +4716,8 @@
}
}
-bool PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
- const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(device_data, descriptorPool);
VK_OBJECT obj_struct = {HandleToUint64(descriptorPool), kVulkanObjectTypeDescriptorPool};
@@ -4786,7 +4730,8 @@
return skip;
}
-void PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!descriptorPool) return;
DESCRIPTOR_POOL_STATE *desc_pool_state = GetDescriptorPoolState(device_data, descriptorPool);
@@ -4807,8 +4752,8 @@
// If this is a secondary command buffer, then make sure its primary is also in-flight
// If primary is not in-flight, then remove secondary from global in-flight set
// This function is only valid at a point when cmdBuffer is being reset or freed
-static bool CheckCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
- const char *error_code) {
+bool CoreChecks::CheckCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
+ const char *error_code) {
bool skip = false;
if (cb_node->in_use.load()) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -4819,8 +4764,8 @@
}
// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
-static bool CheckCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
- const char *error_code) {
+bool CoreChecks::CheckCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
+ const char *error_code) {
bool skip = false;
for (auto cmd_buffer : pPool->commandBuffers) {
skip |= CheckCommandBufferInFlight(dev_data, GetCBNode(dev_data, cmd_buffer), action, error_code);
@@ -4829,8 +4774,8 @@
}
// Free all command buffers in given list, removing all references/links to them using ResetCommandBufferState
-static void FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
- const VkCommandBuffer *command_buffers) {
+void CoreChecks::FreeCommandBufferStates(layer_data *dev_data, COMMAND_POOL_NODE *pool_state, const uint32_t command_buffer_count,
+ const VkCommandBuffer *command_buffers) {
if (GetEnables(dev_data)->gpu_validation) {
GpuPreCallRecordFreeCommandBuffers(dev_data, command_buffer_count, command_buffers);
}
@@ -4849,8 +4794,8 @@
}
}
-bool PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
- const VkCommandBuffer *pCommandBuffers) {
+bool CoreChecks::PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
for (uint32_t i = 0; i < commandBufferCount; i++) {
@@ -4863,30 +4808,31 @@
return skip;
}
-void PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
- const VkCommandBuffer *pCommandBuffers) {
+void CoreChecks::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto pPool = GetCommandPoolNode(device_data, commandPool);
FreeCommandBufferStates(device_data, pPool, commandBufferCount, pCommandBuffers);
}
-bool PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
+bool CoreChecks::PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateDeviceQueueFamily(device_data, pCreateInfo->queueFamilyIndex, "vkCreateCommandPool",
"pCreateInfo->queueFamilyIndex", "VUID-vkCreateCommandPool-queueFamilyIndex-01937");
}
-void PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool, VkResult result) {
+void CoreChecks::PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkCommandPool *pCommandPool,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
device_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
device_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
}
-bool PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
+bool CoreChecks::PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
@@ -4900,15 +4846,16 @@
return skip;
}
-void PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool, VkResult result) {
+void CoreChecks::PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
QUERY_POOL_NODE *qp_node = &device_data->queryPoolMap[*pQueryPool];
qp_node->createInfo = *pCreateInfo;
}
-bool PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(device_data, commandPool);
@@ -4922,7 +4869,8 @@
return skip;
}
-void PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!commandPool) return;
COMMAND_POOL_NODE *cp_state = GetCommandPoolNode(device_data, commandPool);
@@ -4936,14 +4884,15 @@
}
}
-bool PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
+bool CoreChecks::PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto command_pool_state = GetCommandPoolNode(device_data, commandPool);
return CheckCommandBuffersInFlight(device_data, command_pool_state, "reset command pool with",
"VUID-vkResetCommandPool-commandPool-00040");
}
-void PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result) {
+void CoreChecks::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
// Reset all of the CBs allocated from this pool
@@ -4953,7 +4902,7 @@
}
}
-bool PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
+bool CoreChecks::PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
for (uint32_t i = 0; i < fenceCount; ++i) {
@@ -4967,7 +4916,7 @@
return skip;
}
-void PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkResult result) {
+void CoreChecks::PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
for (uint32_t i = 0; i < fenceCount; ++i) {
auto pFence = GetFenceNode(device_data, pFences[i]);
@@ -4982,7 +4931,8 @@
}
// For given cb_nodes, invalidate them and track object causing invalidation
-void InvalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
+void CoreChecks::InvalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes,
+ VK_OBJECT obj) {
for (auto cb_node : cb_nodes) {
if (cb_node->state == CB_RECORDING) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -5002,7 +4952,8 @@
}
}
-bool PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(device_data, framebuffer);
VK_OBJECT obj_struct = {HandleToUint64(framebuffer), kVulkanObjectTypeFramebuffer};
@@ -5014,7 +4965,8 @@
return skip;
}
-void PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!framebuffer) return;
FRAMEBUFFER_STATE *framebuffer_state = GetFramebufferState(device_data, framebuffer);
@@ -5023,7 +4975,8 @@
device_data->frameBufferMap.erase(framebuffer);
}
-bool PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RENDER_PASS_STATE *rp_state = GetRenderPassState(device_data, renderPass);
VK_OBJECT obj_struct = {HandleToUint64(renderPass), kVulkanObjectTypeRenderPass};
@@ -5035,7 +4988,7 @@
return skip;
}
-void PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!renderPass) return;
RENDER_PASS_STATE *rp_state = GetRenderPassState(device_data, renderPass);
@@ -5045,92 +4998,89 @@
}
// Access helper functions for external modules
-VkFormatProperties GetPDFormatProperties(const core_validation::layer_data *device_data, const VkFormat format) {
+VkFormatProperties CoreChecks::GetPDFormatProperties(const layer_data *device_data, const VkFormat format) {
VkFormatProperties format_properties;
- instance_layer_data *instance_data =
- GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
- instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &format_properties);
+ device_data->instance_dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format,
+ &format_properties);
return format_properties;
}
-VkResult GetPDImageFormatProperties(core_validation::layer_data *device_data, const VkImageCreateInfo *image_ci,
- VkImageFormatProperties *pImageFormatProperties) {
- instance_layer_data *instance_data =
- GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
- return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties(
+VkResult CoreChecks::GetPDImageFormatProperties(layer_data *device_data, const VkImageCreateInfo *image_ci,
+ VkImageFormatProperties *pImageFormatProperties) {
+ return device_data->instance_dispatch_table.GetPhysicalDeviceImageFormatProperties(
device_data->physical_device, image_ci->format, image_ci->imageType, image_ci->tiling, image_ci->usage, image_ci->flags,
pImageFormatProperties);
}
-VkResult GetPDImageFormatProperties2(core_validation::layer_data *device_data,
- const VkPhysicalDeviceImageFormatInfo2 *phys_dev_image_fmt_info,
- VkImageFormatProperties2 *pImageFormatProperties) {
- if (!device_data->instance_data->extensions.vk_khr_get_physical_device_properties_2) return VK_ERROR_EXTENSION_NOT_PRESENT;
- instance_layer_data *instance_data =
- GetLayerDataPtr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
- return instance_data->dispatch_table.GetPhysicalDeviceImageFormatProperties2(device_data->physical_device,
- phys_dev_image_fmt_info, pImageFormatProperties);
+VkResult CoreChecks::GetPDImageFormatProperties2(layer_data *device_data,
+ const VkPhysicalDeviceImageFormatInfo2 *phys_dev_image_fmt_info,
+ VkImageFormatProperties2 *pImageFormatProperties) {
+ if (!device_data->instance_extensions.vk_khr_get_physical_device_properties_2) return VK_ERROR_EXTENSION_NOT_PRESENT;
+ return device_data->instance_dispatch_table.GetPhysicalDeviceImageFormatProperties2(
+ device_data->physical_device, phys_dev_image_fmt_info, pImageFormatProperties);
}
-const debug_report_data *GetReportData(const core_validation::layer_data *device_data) { return device_data->report_data; }
+const debug_report_data *CoreChecks::GetReportData(const layer_data *device_data) { return device_data->report_data; }
-const VkLayerDispatchTable *GetDispatchTable(const core_validation::layer_data *device_data) {
- return &device_data->dispatch_table;
+const VkLayerDispatchTable *CoreChecks::GetDispatchTable(const layer_data *device_data) {
+ return &device_data->device_dispatch_table;
}
-const VkPhysicalDeviceProperties *GetPDProperties(const core_validation::layer_data *device_data) {
+const VkPhysicalDeviceProperties *CoreChecks::GetPDProperties(const layer_data *device_data) {
return &device_data->phys_dev_props;
}
-const VkPhysicalDeviceMemoryProperties *GetPhysicalDeviceMemoryProperties(const core_validation::layer_data *device_data) {
+const VkPhysicalDeviceMemoryProperties *CoreChecks::GetPhysicalDeviceMemoryProperties(const layer_data *device_data) {
return &device_data->phys_dev_mem_props;
}
-const CHECK_DISABLED *GetDisables(core_validation::layer_data *device_data) { return &device_data->instance_data->disabled; }
+const CHECK_DISABLED *CoreChecks::GetDisables(layer_data *device_data) { return &device_data->instance_state->disabled; }
-const CHECK_ENABLED *GetEnables(core_validation::layer_data *device_data) { return &device_data->instance_data->enabled; }
+const CHECK_ENABLED *CoreChecks::GetEnables(layer_data *device_data) { return &device_data->instance_state->enabled; }
-std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *device_data) {
+std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *CoreChecks::GetImageMap(layer_data *device_data) {
return &device_data->imageMap;
}
-std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(core_validation::layer_data *device_data) {
+std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *CoreChecks::GetImageSubresourceMap(layer_data *device_data) {
return &device_data->imageSubresourceMap;
}
-std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *device_data) {
+std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *CoreChecks::GetImageLayoutMap(layer_data *device_data) {
return &device_data->imageLayoutMap;
}
-std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *device_data) {
+std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *CoreChecks::GetImageLayoutMap(layer_data const *device_data) {
return &device_data->imageLayoutMap;
}
-std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data) {
+std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *CoreChecks::GetBufferMap(layer_data *device_data) {
return &device_data->bufferMap;
}
-std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data) {
+std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *CoreChecks::GetBufferViewMap(layer_data *device_data) {
return &device_data->bufferViewMap;
}
-std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data) {
+std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *CoreChecks::GetImageViewMap(layer_data *device_data) {
return &device_data->imageViewMap;
}
-const DeviceFeatures *GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
+const DeviceFeatures *CoreChecks::GetEnabledFeatures(const layer_data *device_data) { return &device_data->enabled_features; }
-const DeviceExtensions *GetDeviceExtensions(const layer_data *device_data) { return &device_data->extensions; }
+const DeviceExtensions *CoreChecks::GetDeviceExtensions(const layer_data *device_data) { return &device_data->device_extensions; }
-GpuValidationState *GetGpuValidationState(layer_data *device_data) { return &device_data->gpu_validation_state; }
-const GpuValidationState *GetGpuValidationState(const layer_data *device_data) { return &device_data->gpu_validation_state; }
+GpuValidationState *CoreChecks::GetGpuValidationState(layer_data *device_data) { return &device_data->gpu_validation_state; }
+const GpuValidationState *CoreChecks::GetGpuValidationState(const layer_data *device_data) {
+ return &device_data->gpu_validation_state;
+}
-VkDevice GetDevice(const layer_data *device_data) { return device_data->device; }
+VkDevice CoreChecks::GetDevice(const layer_data *device_data) { return device_data->device; }
-uint32_t GetApiVersion(const layer_data *device_data) { return device_data->api_version; }
+uint32_t CoreChecks::GetApiVersion(const layer_data *device_data) { return device_data->api_version; }
-void PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkFence *pFence, VkResult result) {
+void CoreChecks::PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkFence *pFence, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
auto &fence_node = device_data->fenceMap[*pFence];
@@ -5162,8 +5112,9 @@
}
}
-static bool ValidatePipelineVertexDivisors(layer_data *dev_data, vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec,
- const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) {
+bool CoreChecks::ValidatePipelineVertexDivisors(layer_data *dev_data,
+ std::vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec,
+ const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &(GetPDProperties(dev_data)->limits);
@@ -5236,10 +5187,10 @@
return skip;
}
-bool PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
- void *cgpl_state_data) {
+bool CoreChecks::PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ void *cgpl_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
@@ -5260,7 +5211,7 @@
skip |= ValidatePipelineUnlocked(device_data, cgpl_state->pipe_state, i);
}
- if (device_data->extensions.vk_ext_vertex_attribute_divisor) {
+ if (device_data->device_extensions.vk_ext_vertex_attribute_divisor) {
skip |= ValidatePipelineVertexDivisors(device_data, cgpl_state->pipe_state, count, pCreateInfos);
}
@@ -5268,9 +5219,10 @@
}
// GPU validation may replace pCreateInfos for the down-chain call
-void PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines, void *cgpl_state_data) {
+void CoreChecks::PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ void *cgpl_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
cgpl_state->pCreateInfos = pCreateInfos;
@@ -5282,10 +5234,10 @@
}
}
-void PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result,
- void *cgpl_state_data) {
+void CoreChecks::PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ VkResult result, void *cgpl_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
create_graphics_pipeline_api_state *cgpl_state = reinterpret_cast<create_graphics_pipeline_api_state *>(cgpl_state_data);
// This API may create pipelines regardless of the return value
@@ -5303,9 +5255,10 @@
cgpl_state->pipe_state.clear();
}
-bool PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines, void *pipe_state_data) {
+bool CoreChecks::PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkComputePipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ void *pipe_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
std::vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
@@ -5323,9 +5276,10 @@
return skip;
}
-void PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
- VkPipeline *pPipelines, VkResult result, void *pipe_state_data) {
+void CoreChecks::PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkComputePipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ VkResult result, void *pipe_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
std::vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
reinterpret_cast<std::vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data);
@@ -5339,10 +5293,10 @@
}
}
-bool PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
- void *pipe_state_data) {
+bool CoreChecks::PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ void *pipe_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
// The order of operations here is a little convoluted but gets the job done
@@ -5366,10 +5320,10 @@
return skip;
}
-void PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, VkResult result,
- void *pipe_state_data) {
+void CoreChecks::PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkRayTracingPipelineCreateInfoNV *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines,
+ VkResult result, void *pipe_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
vector<std::unique_ptr<PIPELINE_STATE>> *pipe_state =
reinterpret_cast<vector<std::unique_ptr<PIPELINE_STATE>> *>(pipe_state_data);
@@ -5382,26 +5336,27 @@
}
}
-void PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkSampler *pSampler, VkResult result) {
+void CoreChecks::PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSampler *pSampler, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
device_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
}
-bool PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
+bool CoreChecks::PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorSetLayout *pSetLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (device_data->instance_data->disabled.create_descriptor_set_layout) return false;
return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(
- device_data->report_data, pCreateInfo, device_data->extensions.vk_khr_push_descriptor,
- device_data->phys_dev_ext_props.max_push_descriptors, device_data->extensions.vk_ext_descriptor_indexing,
+ device_data->report_data, pCreateInfo, device_data->device_extensions.vk_khr_push_descriptor,
+ device_data->phys_dev_ext_props.max_push_descriptors, device_data->device_extensions.vk_ext_descriptor_indexing,
&device_data->enabled_features.descriptor_indexing, &device_data->enabled_features.inline_uniform_block,
&device_data->phys_dev_ext_props.inline_uniform_block_props);
}
-void PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout,
- VkResult result) {
+void CoreChecks::PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
device_data->descriptorSetLayoutMap[*pSetLayout] =
@@ -5624,8 +5579,8 @@
return sum_by_type;
}
-bool PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
+bool CoreChecks::PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
@@ -5853,7 +5808,7 @@
device_data->phys_dev_ext_props.inline_uniform_block_props.maxDescriptorSetInlineUniformBlocks);
}
- if (device_data->extensions.vk_ext_descriptor_indexing) {
+ if (device_data->device_extensions.vk_ext_descriptor_indexing) {
// XXX TODO: replace with correct VU messages
// Max descriptors by type, within a single pipeline stage
@@ -6103,9 +6058,9 @@
return pipeline_layout_compat_dict.look_up(PipelineLayoutCompatDef(set_index, pcr_id, set_layouts_id));
}
-void PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
- void *cpl_state_data) {
+void CoreChecks::PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
+ void *cpl_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
create_pipeline_layout_api_state *cpl_state = reinterpret_cast<create_pipeline_layout_api_state *>(cpl_state_data);
if (GetEnables(device_data)->gpu_validation) {
@@ -6114,9 +6069,9 @@
}
}
-void PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
- VkResult result) {
+void CoreChecks::PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Clean up GPU validation
@@ -6145,9 +6100,9 @@
}
}
-void PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool,
- VkResult result) {
+void CoreChecks::PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkDescriptorPool *pDescriptorPool,
+ VkResult result) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
@@ -6155,7 +6110,8 @@
dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
}
-bool PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
+bool CoreChecks::PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
+ VkDescriptorPoolResetFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Make sure sets being destroyed are not currently in-use
if (device_data->instance_data->disabled.idle_descriptor_set) return false;
@@ -6175,8 +6131,8 @@
return skip;
}
-void PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags,
- VkResult result) {
+void CoreChecks::PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
+ VkDescriptorPoolResetFlags flags, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
DESCRIPTOR_POOL_STATE *pPool = GetDescriptorPoolState(device_data, descriptorPool);
@@ -6196,33 +6152,33 @@
// Ensure the pool contains enough descriptors and descriptor sets to satisfy
// an allocation request. Fills common_data with the total number of descriptors of each type required,
// as well as DescriptorSetLayout ptrs used for later update.
-bool PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
- VkDescriptorSet *pDescriptorSets, void *ads_state_data) {
+bool CoreChecks::PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
+ VkDescriptorSet *pDescriptorSets, void *ads_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Always update common data
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
- cvdescriptorset::UpdateAllocateDescriptorSetsData(device_data, pAllocateInfo, ads_state);
+ UpdateAllocateDescriptorSetsData(device_data, pAllocateInfo, ads_state);
if (device_data->instance_data->disabled.allocate_descriptor_sets) return false;
// All state checks for AllocateDescriptorSets is done in single function
- return cvdescriptorset::ValidateAllocateDescriptorSets(device_data, pAllocateInfo, ads_state);
+ return ValidateAllocateDescriptorSets(device_data, pAllocateInfo, ads_state);
}
// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
-void PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
- VkDescriptorSet *pDescriptorSets, VkResult result, void *ads_state_data) {
+void CoreChecks::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
+ VkDescriptorSet *pDescriptorSets, VkResult result, void *ads_state_data) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
// All the updates are contained in a single cvdescriptorset function
cvdescriptorset::AllocateDescriptorSetsData *ads_state =
reinterpret_cast<cvdescriptorset::AllocateDescriptorSetsData *>(ads_state_data);
- cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, ads_state, &device_data->descriptorPoolMap,
- &device_data->setMap, device_data);
+ PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, ads_state, &device_data->descriptorPoolMap, &device_data->setMap,
+ device_data);
}
-bool PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
- const VkDescriptorSet *pDescriptorSets) {
+bool CoreChecks::PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
+ const VkDescriptorSet *pDescriptorSets) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// Make sure that no sets being destroyed are in-flight
bool skip = false;
@@ -6243,8 +6199,8 @@
return skip;
}
-void PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
- const VkDescriptorSet *pDescriptorSets) {
+void CoreChecks::PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
+ const VkDescriptorSet *pDescriptorSets) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
DESCRIPTOR_POOL_STATE *pool_state = GetDescriptorPoolState(device_data, descriptorPool);
// Update available descriptor sets in pool
@@ -6266,11 +6222,11 @@
}
}
-bool PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet *pDescriptorCopies) {
+bool CoreChecks::PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet *pDescriptorCopies) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
- if (device_data->instance_data->disabled.update_descriptor_sets) return false;
+ if (device_data->disabled.update_descriptor_sets) return false;
// First thing to do is perform map look-ups.
// NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
// so we can't just do a single map look-up up-front, but do them individually in functions below
@@ -6278,21 +6234,20 @@
// Now make call(s) that validate state, but don't perform state updates in this function
// Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
// namespace which will parse params and make calls into specific class instances
- return cvdescriptorset::ValidateUpdateDescriptorSets(device_data->report_data, device_data, descriptorWriteCount,
- pDescriptorWrites, descriptorCopyCount, pDescriptorCopies,
- "vkUpdateDescriptorSets()");
+ return ValidateUpdateDescriptorSets(device_data->report_data, device_data, descriptorWriteCount, pDescriptorWrites,
+ descriptorCopyCount, pDescriptorCopies, "vkUpdateDescriptorSets()");
}
-void PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet *pDescriptorCopies) {
+void CoreChecks::PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet *pDescriptorCopies) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
cvdescriptorset::PerformUpdateDescriptorSets(device_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
-void PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
- VkCommandBuffer *pCommandBuffer, VkResult result) {
+void CoreChecks::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo,
+ VkCommandBuffer *pCommandBuffer, VkResult result) {
if (VK_SUCCESS != result) return;
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto pPool = GetCommandPoolNode(device_data, pCreateInfo->commandPool);
@@ -6311,7 +6266,7 @@
}
// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
-static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
+void CoreChecks::AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
AddCommandBufferBinding(&fb_state->cb_bindings, {HandleToUint64(fb_state->framebuffer), kVulkanObjectTypeFramebuffer},
cb_state);
@@ -6324,7 +6279,7 @@
}
}
-bool PreCallValidateBeginCommandBuffer(const VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
+bool CoreChecks::PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (!cb_state) return false;
@@ -6405,7 +6360,7 @@
return skip;
}
-void PreCallRecordBeginCommandBuffer(const VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
+void CoreChecks::PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (!cb_state) return;
@@ -6445,7 +6400,7 @@
}
}
-bool PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) {
+bool CoreChecks::PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (!cb_state) return false;
@@ -6466,7 +6421,7 @@
return skip;
}
-void PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) {
+void CoreChecks::PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
if (!cb_state) return;
@@ -6480,7 +6435,7 @@
}
}
-bool PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
+bool CoreChecks::PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = false;
GLOBAL_CB_NODE *pCB = GetCBNode(device_data, commandBuffer);
@@ -6501,14 +6456,15 @@
return skip;
}
-void PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result) {
+void CoreChecks::PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result) {
if (VK_SUCCESS == result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
ResetCommandBufferState(device_data, commandBuffer);
}
}
-bool PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
+bool CoreChecks::PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6521,7 +6477,8 @@
return skip;
}
-void PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
+void CoreChecks::PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6537,8 +6494,8 @@
AddCommandBufferBinding(&pipe_state->cb_bindings, {HandleToUint64(pipeline), kVulkanObjectTypePipeline}, cb_state);
}
-bool PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkViewport *pViewports) {
+bool CoreChecks::PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
+ const VkViewport *pViewports) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6553,16 +6510,16 @@
return skip;
}
-void PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkViewport *pViewports) {
+void CoreChecks::PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
+ const VkViewport *pViewports) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->viewportMask |= ((1u << viewportCount) - 1u) << firstViewport;
cb_state->status |= CBSTATUS_VIEWPORT_SET;
}
-bool PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
- const VkRect2D *pScissors) {
+bool CoreChecks::PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
+ const VkRect2D *pScissors) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6577,16 +6534,16 @@
return skip;
}
-void PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
- const VkRect2D *pScissors) {
+void CoreChecks::PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
+ const VkRect2D *pScissors) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->scissorMask |= ((1u << scissorCount) - 1u) << firstScissor;
cb_state->status |= CBSTATUS_SCISSOR_SET;
}
-bool PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
- uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
+bool CoreChecks::PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
+ uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6608,8 +6565,8 @@
return skip;
}
-void PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
- uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
+void CoreChecks::PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
+ uint32_t exclusiveScissorCount, const VkRect2D *pExclusiveScissors) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// XXX TODO: We don't have VUIDs for validating that all exclusive scissors have been set.
@@ -6617,7 +6574,8 @@
cb_state->status |= CBSTATUS_EXCLUSIVE_SCISSOR_SET;
}
-bool PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) {
+bool CoreChecks::PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
+ VkImageLayout imageLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6681,7 +6639,8 @@
return skip;
}
-void PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout) {
+void CoreChecks::PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
+ VkImageLayout imageLayout) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -6691,8 +6650,9 @@
}
}
-bool PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
- uint32_t viewportCount, const VkShadingRatePaletteNV *pShadingRatePalettes) {
+bool CoreChecks::PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkShadingRatePaletteNV *pShadingRatePalettes) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6729,8 +6689,9 @@
return skip;
}
-void PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkShadingRatePaletteNV *pShadingRatePalettes) {
+void CoreChecks::PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkShadingRatePaletteNV *pShadingRatePalettes) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// XXX TODO: We don't have VUIDs for validating that all shading rate palettes have been set.
@@ -6738,7 +6699,7 @@
cb_state->status |= CBSTATUS_SHADING_RATE_PALETTE_SET;
}
-bool PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
+bool CoreChecks::PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6754,14 +6715,14 @@
return skip;
}
-void PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
+void CoreChecks::PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_LINE_WIDTH_SET;
}
-bool PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
- float depthBiasSlopeFactor) {
+bool CoreChecks::PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
+ float depthBiasSlopeFactor) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6782,14 +6743,14 @@
return skip;
}
-void PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
- float depthBiasSlopeFactor) {
+void CoreChecks::PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
+ float depthBiasSlopeFactor) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_DEPTH_BIAS_SET;
}
-bool PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
+bool CoreChecks::PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6804,13 +6765,13 @@
return skip;
}
-void PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
+void CoreChecks::PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_BLEND_CONSTANTS_SET;
}
-bool PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
+bool CoreChecks::PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6825,13 +6786,14 @@
return skip;
}
-void PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
+void CoreChecks::PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_DEPTH_BOUNDS_SET;
}
-bool PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
+bool CoreChecks::PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
+ uint32_t compareMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6846,13 +6808,15 @@
return skip;
}
-void PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
+void CoreChecks::PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
+ uint32_t compareMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_STENCIL_READ_MASK_SET;
}
-bool PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
+bool CoreChecks::PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
+ uint32_t writeMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6867,13 +6831,15 @@
return skip;
}
-void PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
+void CoreChecks::PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
+ uint32_t writeMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
}
-bool PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
+bool CoreChecks::PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
+ uint32_t reference) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -6888,7 +6854,8 @@
return skip;
}
-void PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
+void CoreChecks::PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
+ uint32_t reference) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->status |= CBSTATUS_STENCIL_REFERENCE_SET;
@@ -6997,10 +6964,10 @@
}
// Update the bound state for the bind point, including the effects of incompatible pipeline layouts
-void PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
- const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
- const uint32_t *pDynamicOffsets) {
+void CoreChecks::PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
+ const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
+ const uint32_t *pDynamicOffsets) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
auto pipeline_layout = GetPipelineLayout(device_data, layout);
@@ -7041,10 +7008,10 @@
return skip;
}
-bool PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
- const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
- const uint32_t *pDynamicOffsets) {
+bool CoreChecks::PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
+ const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
+ const uint32_t *pDynamicOffsets) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -7134,8 +7101,8 @@
// Validates that the supplied bind point is supported for the command buffer (vis. the command pool)
// Takes array of error codes as some of the VUID's (e.g. vkCmdBindPipeline) are written per bindpoint
// TODO add vkCmdBindPipeline bind_point validation using this call.
-bool ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
- const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) {
+bool CoreChecks::ValidatePipelineBindPoint(layer_data *device_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
+ const char *func_name, const std::map<VkPipelineBindPoint, std::string> &bind_errors) {
bool skip = false;
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
if (pool) { // The loss of a pool in a recording cmd is reported in DestroyCommandPool
@@ -7145,7 +7112,7 @@
std::make_pair(VK_PIPELINE_BIND_POINT_RAY_TRACING_NV,
static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)),
};
- const auto &qfp = GetPhysicalDeviceState(device_data)->queue_family_properties[pool->queueFamilyIndex];
+ const auto &qfp = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex];
if (0 == (qfp.queueFlags & flag_mask.at(bind_point))) {
const std::string &error = bind_errors.at(bind_point);
auto cb_u64 = HandleToUint64(cb_state->commandBuffer);
@@ -7160,9 +7127,9 @@
return skip;
}
-bool PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet *pDescriptorWrites) {
+bool CoreChecks::PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -7214,9 +7181,9 @@
return skip;
}
-static void RecordCmdPushDescriptorSetState(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
- VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
- uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
+void CoreChecks::RecordCmdPushDescriptorSetState(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
+ VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set,
+ uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites) {
const auto &pipeline_layout = GetPipelineLayout(device_data, layout);
// Short circuit invalid updates
if (!pipeline_layout || (set >= pipeline_layout->set_layouts.size()) || !pipeline_layout->set_layouts[set] ||
@@ -7240,9 +7207,9 @@
push_descriptor_set->PerformPushDescriptorsUpdate(descriptorWriteCount, pDescriptorWrites);
}
-void PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet *pDescriptorWrites) {
+void CoreChecks::PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet *pDescriptorWrites) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
RecordCmdPushDescriptorSetState(device_data, cb_state, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites);
@@ -7261,7 +7228,8 @@
}
}
-bool PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
+bool CoreChecks::PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkIndexType indexType) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto buffer_state = GetBufferState(device_data, buffer);
auto cb_node = GetCBNode(device_data, commandBuffer);
@@ -7287,7 +7255,8 @@
return skip;
}
-void PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
+void CoreChecks::PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkIndexType indexType) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto buffer_state = GetBufferState(device_data, buffer);
auto cb_node = GetCBNode(device_data, commandBuffer);
@@ -7301,8 +7270,8 @@
static inline void UpdateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->draw_data.push_back(pCB->current_draw_data); }
-bool PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
- const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
+bool CoreChecks::PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
+ const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -7327,8 +7296,8 @@
return skip;
}
-void PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
- const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
+void CoreChecks::PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
+ const VkBuffer *pBuffers, const VkDeviceSize *pOffsets) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_state = GetCBNode(device_data, commandBuffer);
@@ -7345,10 +7314,10 @@
}
// Generic function to handle validation for all CmdDraw* type functions
-static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
- CMD_TYPE cmd_type, const char *caller, VkQueueFlags queue_flags, const char *queue_flag_code,
- const char *renderpass_msg_code, const char *pipebound_msg_code,
- const char *dynamic_state_msg_code) {
+bool CoreChecks::ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
+ CMD_TYPE cmd_type, const char *caller, VkQueueFlags queue_flags, const char *queue_flag_code,
+ const char *renderpass_msg_code, const char *pipebound_msg_code,
+ const char *dynamic_state_msg_code) {
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(dev_data, cmd_buffer);
if (cb_state) {
@@ -7363,40 +7332,40 @@
}
// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
-static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
+void CoreChecks::UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateDrawState(dev_data, cb_state, bind_point);
}
// Generic function to handle state update for all CmdDraw* type functions
-static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
+void CoreChecks::UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point);
UpdateResourceTrackingOnDraw(cb_state);
cb_state->hasDrawCmd = true;
}
-bool PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
- uint32_t firstInstance) {
+bool CoreChecks::PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
+ uint32_t firstVertex, uint32_t firstInstance) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
return ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAW, "vkCmdDraw()",
VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDraw-commandBuffer-cmdpool", "VUID-vkCmdDraw-renderpass",
"VUID-vkCmdDraw-None-00442", "VUID-vkCmdDraw-None-00443");
}
-void PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
- uint32_t firstInstance) {
+void CoreChecks::PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
+ uint32_t firstVertex, uint32_t firstInstance) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-void PostCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
- uint32_t firstInstance) {
+void CoreChecks::PostCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
+ uint32_t firstVertex, uint32_t firstInstance) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-bool PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
- int32_t vertexOffset, uint32_t firstInstance) {
+bool CoreChecks::PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdDrawType(device_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXED,
"vkCmdDrawIndexed()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndexed-commandBuffer-cmdpool",
@@ -7425,21 +7394,21 @@
return skip;
}
-void PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
- int32_t vertexOffset, uint32_t firstInstance) {
+void CoreChecks::PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-void PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
- int32_t vertexOffset, uint32_t firstInstance) {
+void CoreChecks::PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-bool PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride) {
+bool CoreChecks::PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDIRECT,
"vkCmdDrawIndirect()", VK_QUEUE_GRAPHICS_BIT, "VUID-vkCmdDrawIndirect-commandBuffer-cmdpool",
@@ -7452,14 +7421,14 @@
return skip;
}
-void PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride) {
+void CoreChecks::PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-void PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride) {
+void CoreChecks::PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
@@ -7467,8 +7436,8 @@
AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
}
-bool PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride) {
+bool CoreChecks::PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t count, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdDrawType(
device_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWINDEXEDINDIRECT, "vkCmdDrawIndexedIndirect()",
@@ -7483,14 +7452,14 @@
return skip;
}
-void PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride) {
+void CoreChecks::PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t count, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-void PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride) {
+void CoreChecks::PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t count, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
@@ -7498,25 +7467,25 @@
AddCommandBufferBindingBuffer(device_data, cb_state, buffer_state);
}
-bool PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
+bool CoreChecks::PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
return ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCH, "vkCmdDispatch()",
VK_QUEUE_COMPUTE_BIT, "VUID-vkCmdDispatch-commandBuffer-cmdpool", "VUID-vkCmdDispatch-renderpass",
"VUID-vkCmdDispatch-None-00391", kVUIDUndefined);
}
-void PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
+void CoreChecks::PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE);
}
-void PostCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
+void CoreChecks::PostCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
UpdateStateCmdDrawDispatchType(device_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
}
-bool PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
+bool CoreChecks::PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip =
ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, CMD_DISPATCHINDIRECT,
@@ -7528,12 +7497,12 @@
return skip;
}
-void PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
+void CoreChecks::PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GpuAllocateValidationResources(device_data, commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE);
}
-void PostCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
+void CoreChecks::PostCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
UpdateStateCmdDrawDispatchType(device_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
@@ -7542,8 +7511,8 @@
}
// Validate that an image's sampleCount matches the requirement for a specific API call
-bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
- const char *location, const std::string &msgCode) {
+bool CoreChecks::ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
+ const char *location, const std::string &msgCode) {
bool skip = false;
if (image_state->createInfo.samples != sample_count) {
skip = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
@@ -7555,8 +7524,8 @@
return skip;
}
-bool PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
- VkDeviceSize dataSize, const void *pData) {
+bool CoreChecks::PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize dataSize, const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -7578,8 +7547,8 @@
return skip;
}
-void PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize,
- const void *pData) {
+void CoreChecks::PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize dataSize, const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_state = GetCBNode(device_data, commandBuffer);
auto dst_buffer_state = GetBufferState(device_data, dstBuffer);
@@ -7588,7 +7557,7 @@
AddCommandBufferBindingBuffer(device_data, cb_state, dst_buffer_state);
}
-bool SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
+bool CoreChecks::SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
@@ -7601,7 +7570,7 @@
return false;
}
-bool PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
+bool CoreChecks::PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -7615,7 +7584,7 @@
return skip;
}
-void PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
+void CoreChecks::PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
auto event_state = GetEventNode(device_data, event);
@@ -7630,7 +7599,7 @@
cb_state->eventUpdates.emplace_back([=](VkQueue q) { return SetEventStageMask(q, commandBuffer, event, stageMask); });
}
-bool PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
+bool CoreChecks::PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -7645,7 +7614,7 @@
return skip;
}
-void PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
+void CoreChecks::PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
auto event_state = GetEventNode(device_data, event);
@@ -7767,10 +7736,10 @@
}
// Verify image barrier image state and that the image is consistent with FB image
-static bool ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
- VkFramebuffer framebuffer, uint32_t active_subpass,
- const safe_VkSubpassDescription2KHR &sub_desc, uint64_t rp_handle, uint32_t img_index,
- const VkImageMemoryBarrier &img_barrier) {
+bool CoreChecks::ValidateImageBarrierImage(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE const *cb_state,
+ VkFramebuffer framebuffer, uint32_t active_subpass,
+ const safe_VkSubpassDescription2KHR &sub_desc, uint64_t rp_handle, uint32_t img_index,
+ const VkImageMemoryBarrier &img_barrier) {
bool skip = false;
const auto &fb_state = GetFramebufferState(device_data, framebuffer);
assert(fb_state);
@@ -7852,11 +7821,11 @@
}
// Validate image barriers within a renderPass
-static bool ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
- uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc,
- uint64_t rp_handle, const safe_VkSubpassDependency2KHR *dependencies,
- const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
- const VkImageMemoryBarrier *image_barriers) {
+bool CoreChecks::ValidateRenderPassImageBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
+ uint32_t active_subpass, const safe_VkSubpassDescription2KHR &sub_desc,
+ uint64_t rp_handle, const safe_VkSubpassDependency2KHR *dependencies,
+ const std::vector<uint32_t> &self_dependencies, uint32_t image_mem_barrier_count,
+ const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
for (uint32_t i = 0; i < image_mem_barrier_count; ++i) {
const auto &img_barrier = image_barriers[i];
@@ -7913,12 +7882,12 @@
// Validate VUs for Pipeline Barriers that are within a renderPass
// Pre: cb_state->activeRenderPass must be a pointer to valid renderPass state
-static bool ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
- VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
- VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
- const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
- const VkBufferMemoryBarrier *buffer_mem_barriers, uint32_t image_mem_barrier_count,
- const VkImageMemoryBarrier *image_barriers) {
+bool CoreChecks::ValidateRenderPassPipelineBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
+ VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
+ VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
+ const VkMemoryBarrier *mem_barriers, uint32_t buffer_mem_barrier_count,
+ const VkBufferMemoryBarrier *buffer_mem_barriers,
+ uint32_t image_mem_barrier_count, const VkImageMemoryBarrier *image_barriers) {
bool skip = false;
const auto rp_state = cb_state->activeRenderPass;
const auto active_subpass = cb_state->activeSubpass;
@@ -7937,8 +7906,8 @@
bool stage_mask_match = false;
for (const auto self_dep_index : self_dependencies) {
const auto &sub_dep = dependencies[self_dep_index];
- const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_data->extensions, sub_dep.srcStageMask);
- const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_data->extensions, sub_dep.dstStageMask);
+ const auto &sub_src_stage_mask = ExpandPipelineStageFlags(device_data->device_extensions, sub_dep.srcStageMask);
+ const auto &sub_dst_stage_mask = ExpandPipelineStageFlags(device_data->device_extensions, sub_dep.dstStageMask);
stage_mask_match = ((sub_src_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
(src_stage_mask == (sub_src_stage_mask & src_stage_mask))) &&
((sub_dst_stage_mask == VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) ||
@@ -8170,8 +8139,8 @@
sharing_mode_(sharing_mode),
object_type_(object_type),
val_codes_(val_codes),
- limit_(static_cast<uint32_t>(GetPhysicalDeviceState(device_data)->queue_family_properties.size())),
- mem_ext_(device_data->extensions.vk_khr_external_memory) {}
+ limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
+ mem_ext_(device_data->device_extensions.vk_khr_external_memory) {}
// Create a validator state from an image state... reducing the image specific to the generic version.
ValidatorState(const layer_data *device_data, const char *func_name, const GLOBAL_CB_NODE *cb_state,
@@ -8361,21 +8330,21 @@
return barrier_queue_families::Validate(device_data, func_name, cb_state, val, src_queue_family, dst_queue_family);
}
-static bool ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
- VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
- const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
- const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
- const VkImageMemoryBarrier *pImageMemBarriers) {
+bool CoreChecks::ValidateBarriers(layer_data *device_data, const char *funcName, GLOBAL_CB_NODE *cb_state,
+ VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
+ uint32_t memBarrierCount, const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
+ const VkImageMemoryBarrier *pImageMemBarriers) {
bool skip = false;
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
- if (!ValidateAccessMaskPipelineStage(device_data->extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
+ if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier.srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier.srcAccessMask, src_stage_mask);
}
- if (!ValidateAccessMaskPipelineStage(device_data->extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
+ if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier.dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
@@ -8384,13 +8353,13 @@
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
auto mem_barrier = &pImageMemBarriers[i];
- if (!ValidateAccessMaskPipelineStage(device_data->extensions, mem_barrier->srcAccessMask, src_stage_mask)) {
+ if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pImageMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier->srcAccessMask, src_stage_mask);
}
- if (!ValidateAccessMaskPipelineStage(device_data->extensions, mem_barrier->dstAccessMask, dst_stage_mask)) {
+ if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pImageMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
@@ -8426,13 +8395,13 @@
auto mem_barrier = &pBufferMemBarriers[i];
if (!mem_barrier) continue;
- if (!ValidateAccessMaskPipelineStage(device_data->extensions, mem_barrier->srcAccessMask, src_stage_mask)) {
+ if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->srcAccessMask, src_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01184",
"%s: pBufferMemBarriers[%d].srcAccessMask (0x%X) is not supported by srcStageMask (0x%X).", funcName, i,
mem_barrier->srcAccessMask, src_stage_mask);
}
- if (!ValidateAccessMaskPipelineStage(device_data->extensions, mem_barrier->dstAccessMask, dst_stage_mask)) {
+ if (!ValidateAccessMaskPipelineStage(device_data->device_extensions, mem_barrier->dstAccessMask, dst_stage_mask)) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
HandleToUint64(cb_state->commandBuffer), "VUID-vkCmdPipelineBarrier-pMemoryBarriers-01185",
"%s: pBufferMemBarriers[%d].dstAccessMask (0x%X) is not supported by dstStageMask (0x%X).", funcName, i,
@@ -8475,8 +8444,8 @@
return skip;
}
-bool ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
- VkPipelineStageFlags sourceStageMask) {
+bool CoreChecks::ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex,
+ VkPipelineStageFlags sourceStageMask) {
bool skip = false;
VkPipelineStageFlags stageMask = 0;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
@@ -8544,9 +8513,9 @@
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
-bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
- VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
- const char *error_code) {
+bool CoreChecks::CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer,
+ VkPipelineStageFlags stage_mask, VkQueueFlags queue_flags, const char *function,
+ const char *src_or_dest, const char *error_code) {
bool skip = false;
// Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
for (const auto &item : stage_flag_bit_array) {
@@ -8564,7 +8533,7 @@
// Check if all barriers are of a given operation type.
template <typename Barrier, typename OpCheck>
-static bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
+bool AllTransferOp(const COMMAND_POOL_NODE *pool, OpCheck &op_check, uint32_t count, const Barrier *barriers) {
if (!pool) return false;
for (uint32_t b = 0; b < count; b++) {
@@ -8573,24 +8542,20 @@
return true;
}
-enum BarrierOperationsType {
- kAllAcquire, // All Barrier operations are "ownership acquire" operations
- kAllRelease, // All Barrier operations are "ownership release" operations
- kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
-};
-
// Look at the barriers to see if we they are all release or all acquire, the result impacts queue properties validation
-BarrierOperationsType ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state, uint32_t buffer_barrier_count,
- const VkBufferMemoryBarrier *buffer_barriers, uint32_t image_barrier_count,
- const VkImageMemoryBarrier *image_barriers) {
+BarrierOperationsType CoreChecks::ComputeBarrierOperationsType(layer_data *device_data, GLOBAL_CB_NODE *cb_state,
+ uint32_t buffer_barrier_count,
+ const VkBufferMemoryBarrier *buffer_barriers,
+ uint32_t image_barrier_count,
+ const VkImageMemoryBarrier *image_barriers) {
auto pool = GetCommandPoolNode(device_data, cb_state->createInfo.commandPool);
BarrierOperationsType op_type = kGeneral;
// Look at the barrier details only if they exist
// Note: AllTransferOp returns true for count == 0
if ((buffer_barrier_count + image_barrier_count) != 0) {
- if (AllTransferOp(pool, IsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
- AllTransferOp(pool, IsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
+ if (AllTransferOp(pool, TempIsReleaseOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
+ AllTransferOp(pool, TempIsReleaseOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
op_type = kAllRelease;
} else if (AllTransferOp(pool, IsAcquireOp<VkBufferMemoryBarrier>, buffer_barrier_count, buffer_barriers) &&
AllTransferOp(pool, IsAcquireOp<VkImageMemoryBarrier>, image_barrier_count, image_barriers)) {
@@ -8601,14 +8566,14 @@
return op_type;
}
-bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
- VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
- BarrierOperationsType barrier_op_type, const char *function,
- const char *error_code) {
+bool CoreChecks::ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE const *cb_state,
+ VkPipelineStageFlags source_stage_mask,
+ VkPipelineStageFlags dest_stage_mask,
+ BarrierOperationsType barrier_op_type, const char *function,
+ const char *error_code) {
bool skip = false;
uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, dev_data->physical_device);
+ auto physical_device_state = GetPhysicalDeviceState();
// Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
// specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
@@ -8631,11 +8596,11 @@
return skip;
}
-bool PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+bool CoreChecks::PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
+ VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -8660,11 +8625,11 @@
return skip;
}
-void PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+void CoreChecks::PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
+ VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
auto first_event_index = cb_state->events.size();
@@ -8685,22 +8650,24 @@
}
}
-void PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+void CoreChecks::PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
+ VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
RecordBarriersQFOTransfers(device_data, cb_state, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount,
pImageMemoryBarriers);
}
-bool PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+bool CoreChecks::PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -8736,11 +8703,13 @@
return skip;
}
-void PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
+void CoreChecks::PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -8749,7 +8718,7 @@
TransitionImageLayouts(device_data, cb_state, imageMemoryBarrierCount, pImageMemoryBarriers);
}
-static bool SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
+bool CoreChecks::SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *pCB = GetCBNode(dev_data, commandBuffer);
if (pCB) {
@@ -8762,7 +8731,7 @@
return false;
}
-bool PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
+bool CoreChecks::PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -8790,7 +8759,7 @@
return skip;
}
-void PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
+void CoreChecks::PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
QueryObject query = {queryPool, slot};
@@ -8800,7 +8769,7 @@
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
-bool PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
+bool CoreChecks::PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
QueryObject query = {queryPool, slot};
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -8818,7 +8787,7 @@
return skip;
}
-void PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
+void CoreChecks::PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
QueryObject query = {queryPool, slot};
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -8828,8 +8797,8 @@
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
-bool PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount) {
+bool CoreChecks::PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -8840,8 +8809,8 @@
return skip;
}
-void PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount) {
+void CoreChecks::PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -8854,7 +8823,7 @@
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
-static bool IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
+bool CoreChecks::IsQueryInvalid(layer_data *dev_data, QUEUE_STATE *queue_data, VkQueryPool queryPool, uint32_t queryIndex) {
QueryObject query = {queryPool, queryIndex};
auto query_data = queue_data->queryToStateMap.find(query);
if (query_data != queue_data->queryToStateMap.end()) {
@@ -8867,7 +8836,8 @@
return false;
}
-static bool ValidateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
+bool CoreChecks::ValidateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount) {
bool skip = false;
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
auto queue_data = GetQueueState(dev_data, queue);
@@ -8883,9 +8853,9 @@
return skip;
}
-bool PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride,
- VkQueryResultFlags flags) {
+bool CoreChecks::PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize stride, VkQueryResultFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_state = GetCBNode(device_data, commandBuffer);
auto dst_buff_state = GetBufferState(device_data, dstBuffer);
@@ -8905,9 +8875,9 @@
return skip;
}
-void PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride,
- VkQueryResultFlags flags) {
+void CoreChecks::PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize stride, VkQueryResultFlags flags) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
auto cb_state = GetCBNode(device_data, commandBuffer);
auto dst_buff_state = GetBufferState(device_data, dstBuffer);
@@ -8917,8 +8887,9 @@
{HandleToUint64(queryPool), kVulkanObjectTypeQueryPool}, cb_state);
}
-bool PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
- uint32_t offset, uint32_t size, const void *pValues) {
+bool CoreChecks::PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
+ const void *pValues) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = false;
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -8972,8 +8943,8 @@
return skip;
}
-bool PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool,
- uint32_t slot) {
+bool CoreChecks::PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool, uint32_t slot) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -8984,16 +8955,16 @@
return skip;
}
-void PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool,
- uint32_t slot) {
+void CoreChecks::PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool, uint32_t slot) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
QueryObject query = {queryPool, slot};
cb_state->queryUpdates.emplace_back([=](VkQueue q) { return SetQueryState(q, commandBuffer, query, true); });
}
-static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference2KHR *attachments,
- const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) {
+bool CoreChecks::MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference2KHR *attachments,
+ const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag, const char *error_code) {
bool skip = false;
for (uint32_t attach = 0; attach < count; attach++) {
@@ -9029,7 +9000,7 @@
// 6. fb attachments use idenity swizzle
// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
// 8. fb dimensions are within physical device limits
-static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
+bool CoreChecks::ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
bool skip = false;
auto rp_state = GetRenderPassState(dev_data, pCreateInfo->renderPass);
@@ -9167,8 +9138,8 @@
return skip;
}
-bool PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
+bool CoreChecks::PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
// TODO : Verify that renderPass FB is created with is compatible with FB
bool skip = false;
@@ -9176,8 +9147,9 @@
return skip;
}
-void PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer, VkResult result) {
+void CoreChecks::PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkFramebuffer *pFramebuffer,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
// Shadow create info and store in map
@@ -9217,9 +9189,9 @@
return false;
}
-static bool CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
- const std::vector<uint32_t> &dependent_subpasses, const std::vector<DAGNode> &subpass_to_node,
- bool &skip) {
+bool CoreChecks::CheckDependencyExists(const layer_data *dev_data, const uint32_t subpass,
+ const std::vector<uint32_t> &dependent_subpasses,
+ const std::vector<DAGNode> &subpass_to_node, bool &skip) {
bool result = true;
// Loop through all subpasses that share the same attachment and make sure a dependency exists
for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
@@ -9244,8 +9216,8 @@
return result;
}
-static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index,
- const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
+bool CoreChecks::CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo2KHR *pCreateInfo, const int index,
+ const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip) {
const DAGNode &node = subpass_to_node[index];
// If this node writes to the attachment return true as next nodes need to preserve the attachment.
const VkSubpassDescription2KHR &subpass = pCreateInfo->pSubpasses[index];
@@ -9292,8 +9264,8 @@
IsRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
}
-static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
- RENDER_PASS_STATE const *renderPass) {
+bool CoreChecks::ValidateDependencies(layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
+ RENDER_PASS_STATE const *renderPass) {
bool skip = false;
auto const pFramebufferInfo = framebuffer->createInfo.ptr();
auto const pCreateInfo = renderPass->createInfo.ptr();
@@ -9450,7 +9422,7 @@
const VkSubpassDependency2KHR &dependency = pCreateInfo->pDependencies[i];
VkPipelineStageFlags exclude_graphics_pipeline_stages =
~(VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
- ExpandPipelineStageFlags(dev_data->extensions, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT));
+ ExpandPipelineStageFlags(dev_data->device_extensions, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT));
VkPipelineStageFlagBits latest_src_stage = GetLogicallyLatestGraphicsPipelineStage(dependency.srcStageMask);
VkPipelineStageFlagBits earliest_dst_stage = GetLogicallyEarliestGraphicsPipelineStage(dependency.dstStageMask);
@@ -9575,8 +9547,8 @@
return skip;
}
-static bool ValidateAttachmentIndex(const layer_data *dev_data, RenderPassCreateVersion rp_version, uint32_t attachment,
- uint32_t attachment_count, const char *type) {
+bool CoreChecks::ValidateAttachmentIndex(const layer_data *dev_data, RenderPassCreateVersion rp_version, uint32_t attachment,
+ uint32_t attachment_count, const char *type) {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *const function_name = use_rp2 ? "vkCreateRenderPass2KHR()" : "vkCreateRenderPass()";
@@ -9616,9 +9588,9 @@
}
}
-static bool AddAttachmentUse(const layer_data *dev_data, RenderPassCreateVersion rp_version, uint32_t subpass,
- std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts,
- uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) {
+bool CoreChecks::AddAttachmentUse(const layer_data *dev_data, RenderPassCreateVersion rp_version, uint32_t subpass,
+ std::vector<uint8_t> &attachment_uses, std::vector<VkImageLayout> &attachment_layouts,
+ uint32_t attachment, uint8_t new_use, VkImageLayout new_layout) {
if (attachment >= attachment_uses.size()) return false; /* out of range, but already reported */
bool skip = false;
@@ -9649,8 +9621,8 @@
return skip;
}
-static bool ValidateRenderpassAttachmentUsage(const layer_data *dev_data, RenderPassCreateVersion rp_version,
- const VkRenderPassCreateInfo2KHR *pCreateInfo) {
+bool CoreChecks::ValidateRenderpassAttachmentUsage(const layer_data *dev_data, RenderPassCreateVersion rp_version,
+ const VkRenderPassCreateInfo2KHR *pCreateInfo) {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
@@ -9820,7 +9792,7 @@
const auto depth_stencil_sample_count =
pCreateInfo->pAttachments[subpass.pDepthStencilAttachment->attachment].samples;
- if (dev_data->extensions.vk_amd_mixed_attachment_samples) {
+ if (dev_data->device_extensions.vk_amd_mixed_attachment_samples) {
if (pCreateInfo->pAttachments[attachment_ref.attachment].samples > depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pColorAttachments-03070"
: "VUID-VkSubpassDescription-pColorAttachments-01506";
@@ -9836,8 +9808,8 @@
}
}
- if (!dev_data->extensions.vk_amd_mixed_attachment_samples &&
- !dev_data->extensions.vk_nv_framebuffer_mixed_samples &&
+ if (!dev_data->device_extensions.vk_amd_mixed_attachment_samples &&
+ !dev_data->device_extensions.vk_nv_framebuffer_mixed_samples &&
current_sample_count != depth_stencil_sample_count) {
vuid = use_rp2 ? "VUID-VkSubpassDescription2KHR-pDepthStencilAttachment-03071"
: "VUID-VkSubpassDescription-pDepthStencilAttachment-01418";
@@ -9888,8 +9860,8 @@
if (!render_pass->attachment_first_read.count(index)) render_pass->attachment_first_read[index] = is_read;
}
-static bool ValidateCreateRenderPass(const layer_data *dev_data, VkDevice device, RenderPassCreateVersion rp_version,
- const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) {
+bool CoreChecks::ValidateCreateRenderPass(layer_data *dev_data, VkDevice device, RenderPassCreateVersion rp_version,
+ const VkRenderPassCreateInfo2KHR *pCreateInfo, RENDER_PASS_STATE *render_pass) {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *vuid;
@@ -9971,14 +9943,14 @@
"VUID-VkSubpassDependency-dstStageMask-02102");
}
- if (!ValidateAccessMaskPipelineStage(dev_data->extensions, dependency.srcAccessMask, dependency.srcStageMask)) {
+ if (!ValidateAccessMaskPipelineStage(dev_data->device_extensions, dependency.srcAccessMask, dependency.srcStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-srcAccessMask-03088" : "VUID-VkSubpassDependency-srcAccessMask-00868";
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pDependencies[%u].srcAccessMask (0x%" PRIx32 ") is not supported by srcStageMask (0x%" PRIx32 ").",
function_name, i, dependency.srcAccessMask, dependency.srcStageMask);
}
- if (!ValidateAccessMaskPipelineStage(dev_data->extensions, dependency.dstAccessMask, dependency.dstStageMask)) {
+ if (!ValidateAccessMaskPipelineStage(dev_data->device_extensions, dependency.dstAccessMask, dependency.dstStageMask)) {
vuid = use_rp2 ? "VUID-VkSubpassDependency2KHR-dstAccessMask-03089" : "VUID-VkSubpassDependency-dstAccessMask-00869";
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, vuid,
"%s: pDependencies[%u].dstAccessMask (0x%" PRIx32 ") is not supported by dstStageMask (0x%" PRIx32 ").",
@@ -9991,8 +9963,8 @@
return skip;
}
-bool PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
+bool CoreChecks::PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
@@ -10075,16 +10047,18 @@
// Use of rvalue reference exceeds reccommended usage of rvalue refs in google style guide, but intentionally forces caller to move
// or copy. This is clearer than passing a pointer to shared_ptr and avoids the atomic increment/decrement of shared_ptr copy
// construction or assignment.
-void PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) {
+void CoreChecks::PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
RecordCreateRenderPassState(device_data, RENDER_PASS_VERSION_1, render_pass_state, pRenderPass);
}
-void PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass, VkResult result) {
+void CoreChecks::PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
auto render_pass_state = std::make_shared<RENDER_PASS_STATE>(pCreateInfo);
@@ -10219,8 +10193,8 @@
return skip;
}
-bool PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
+bool CoreChecks::PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
@@ -10235,8 +10209,8 @@
return skip;
}
-static bool ValidatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
- const char *error_code) {
+bool CoreChecks::ValidatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, char const *cmd_name,
+ const char *error_code) {
bool skip = false;
if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
@@ -10246,7 +10220,7 @@
return skip;
}
-static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
+bool CoreChecks::VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
bool skip = false;
const safe_VkFramebufferCreateInfo *pFramebufferInfo =
&GetFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
@@ -10279,8 +10253,8 @@
return ((check_color_depth_load_op && (color_depth_op == op)) || (check_stencil_load_op && (stencil_op == op)));
}
-static bool ValidateCmdBeginRenderPass(layer_data *device_data, VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
- const VkRenderPassBeginInfo *pRenderPassBegin) {
+bool CoreChecks::ValidateCmdBeginRenderPass(layer_data *device_data, VkCommandBuffer commandBuffer,
+ RenderPassCreateVersion rp_version, const VkRenderPassBeginInfo *pRenderPassBegin) {
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(device_data, pRenderPassBegin->renderPass) : nullptr;
@@ -10370,22 +10344,22 @@
return skip;
}
-bool PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
- VkSubpassContents contents) {
+bool CoreChecks::PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
+ VkSubpassContents contents) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdBeginRenderPass(device_data, commandBuffer, RENDER_PASS_VERSION_1, pRenderPassBegin);
return skip;
}
-bool PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
- const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
+bool CoreChecks::PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdBeginRenderPass(device_data, commandBuffer, RENDER_PASS_VERSION_2, pRenderPassBegin);
return skip;
}
-static void RecordCmdBeginRenderPassState(layer_data *device_data, VkCommandBuffer commandBuffer,
- const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) {
+void CoreChecks::RecordCmdBeginRenderPassState(layer_data *device_data, VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo *pRenderPassBegin, const VkSubpassContents contents) {
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
auto render_pass_state = pRenderPassBegin ? GetRenderPassState(device_data, pRenderPassBegin->renderPass) : nullptr;
auto framebuffer = pRenderPassBegin ? GetFramebufferState(device_data, pRenderPassBegin->framebuffer) : nullptr;
@@ -10408,19 +10382,20 @@
}
}
-void PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
- VkSubpassContents contents) {
+void CoreChecks::PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
+ VkSubpassContents contents) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
RecordCmdBeginRenderPassState(device_data, commandBuffer, pRenderPassBegin, contents);
}
-void PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
- const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
+void CoreChecks::PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin,
+ const VkSubpassBeginInfoKHR *pSubpassBeginInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
RecordCmdBeginRenderPassState(device_data, commandBuffer, pRenderPassBegin, pSubpassBeginInfo->contents);
}
-static bool ValidateCmdNextSubpass(layer_data *device_data, RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) {
+bool CoreChecks::ValidateCmdNextSubpass(layer_data *device_data, RenderPassCreateVersion rp_version,
+ VkCommandBuffer commandBuffer) {
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
bool skip = false;
@@ -10448,18 +10423,18 @@
return skip;
}
-bool PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
+bool CoreChecks::PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
return ValidateCmdNextSubpass(device_data, RENDER_PASS_VERSION_1, commandBuffer);
}
-bool PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
- const VkSubpassEndInfoKHR *pSubpassEndInfo) {
+bool CoreChecks::PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
return ValidateCmdNextSubpass(device_data, RENDER_PASS_VERSION_2, commandBuffer);
}
-static void RecordCmdNextSubpass(layer_data *device_data, VkCommandBuffer commandBuffer, VkSubpassContents contents) {
+void CoreChecks::RecordCmdNextSubpass(layer_data *device_data, VkCommandBuffer commandBuffer, VkSubpassContents contents) {
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
cb_state->activeSubpass++;
cb_state->activeSubpassContents = contents;
@@ -10467,18 +10442,19 @@
GetFramebufferState(device_data, cb_state->activeRenderPassBeginInfo.framebuffer));
}
-void PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
+void CoreChecks::PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
RecordCmdNextSubpass(device_data, commandBuffer, contents);
}
-void PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
- const VkSubpassEndInfoKHR *pSubpassEndInfo) {
+void CoreChecks::PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR *pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR *pSubpassEndInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
RecordCmdNextSubpass(device_data, commandBuffer, pSubpassBeginInfo->contents);
}
-static bool ValidateCmdEndRenderPass(layer_data *device_data, RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer) {
+bool CoreChecks::ValidateCmdEndRenderPass(layer_data *device_data, RenderPassCreateVersion rp_version,
+ VkCommandBuffer commandBuffer) {
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
bool skip = false;
@@ -10509,19 +10485,19 @@
return skip;
}
-bool PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) {
+bool CoreChecks::PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdEndRenderPass(device_data, RENDER_PASS_VERSION_1, commandBuffer);
return skip;
}
-bool PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
+bool CoreChecks::PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdEndRenderPass(device_data, RENDER_PASS_VERSION_2, commandBuffer);
return skip;
}
-static void RecordCmdEndRenderPassState(layer_data *device_data, VkCommandBuffer commandBuffer) {
+void CoreChecks::RecordCmdEndRenderPassState(layer_data *device_data, VkCommandBuffer commandBuffer) {
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
FRAMEBUFFER_STATE *framebuffer = GetFramebufferState(device_data, cb_state->activeFramebuffer);
TransitionFinalSubpassLayouts(device_data, cb_state, &cb_state->activeRenderPassBeginInfo, framebuffer);
@@ -10530,18 +10506,18 @@
cb_state->activeFramebuffer = VK_NULL_HANDLE;
}
-void PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
+void CoreChecks::PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
RecordCmdEndRenderPassState(device_data, commandBuffer);
}
-void PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
+void CoreChecks::PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR *pSubpassEndInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
RecordCmdEndRenderPassState(device_data, commandBuffer);
}
-static bool ValidateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
- VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
+bool CoreChecks::ValidateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
+ VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB, const char *caller) {
bool skip = false;
if (!pSubCB->beginInfo.pInheritanceInfo) {
return skip;
@@ -10571,7 +10547,7 @@
return skip;
}
-static bool ValidateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
+bool CoreChecks::ValidateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
bool skip = false;
unordered_set<int> activeTypes;
for (auto queryObject : pCB->activeQueries) {
@@ -10621,8 +10597,8 @@
return skip;
}
-bool PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
- const VkCommandBuffer *pCommandBuffers) {
+bool CoreChecks::PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
+ const VkCommandBuffer *pCommandBuffers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -10754,8 +10730,8 @@
return skip;
}
-void PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
- const VkCommandBuffer *pCommandBuffers) {
+void CoreChecks::PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
+ const VkCommandBuffer *pCommandBuffers) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -10801,8 +10777,8 @@
}
}
-bool PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
- void **ppData) {
+bool CoreChecks::PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
+ VkFlags flags, void **ppData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
DEVICE_MEM_INFO *mem_info = GetMemObjInfo(device_data, mem);
@@ -10821,8 +10797,8 @@
return skip;
}
-void PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
- void **ppData, VkResult result) {
+void CoreChecks::PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
+ void **ppData, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
// TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
@@ -10830,7 +10806,7 @@
InitializeAndTrackMemory(device_data, mem, offset, size, ppData);
}
-bool PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) {
+bool CoreChecks::PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto mem_info = GetMemObjInfo(device_data, mem);
@@ -10844,7 +10820,7 @@
return skip;
}
-void PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) {
+void CoreChecks::PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto mem_info = GetMemObjInfo(device_data, mem);
mem_info->mem_range.size = 0;
@@ -10855,8 +10831,8 @@
}
}
-static bool ValidateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
- const VkMappedMemoryRange *pMemRanges) {
+bool CoreChecks::ValidateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
+ const VkMappedMemoryRange *pMemRanges) {
bool skip = false;
for (uint32_t i = 0; i < memRangeCount; ++i) {
auto mem_info = GetMemObjInfo(dev_data, pMemRanges[i].memory);
@@ -10890,8 +10866,8 @@
return skip;
}
-static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
- const VkMappedMemoryRange *mem_ranges) {
+bool CoreChecks::ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
+ const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
@@ -10924,7 +10900,8 @@
return skip;
}
-static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
+void CoreChecks::CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count,
+ const VkMappedMemoryRange *mem_ranges) {
for (uint32_t i = 0; i < mem_range_count; ++i) {
auto mem_info = GetMemObjInfo(dev_data, mem_ranges[i].memory);
if (mem_info && mem_info->shadow_copy) {
@@ -10937,8 +10914,8 @@
}
}
-static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
- const VkMappedMemoryRange *mem_ranges) {
+bool CoreChecks::ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
+ const VkMappedMemoryRange *mem_ranges) {
bool skip = false;
for (uint32_t i = 0; i < mem_range_count; ++i) {
uint64_t atom_size = dev_data->phys_dev_props.limits.nonCoherentAtomSize;
@@ -10963,7 +10940,8 @@
return skip;
}
-bool PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
+bool CoreChecks::PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
+ const VkMappedMemoryRange *pMemRanges) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits(device_data, "vkFlushMappedMemoryRanges", memRangeCount, pMemRanges);
@@ -10972,7 +10950,8 @@
return skip;
}
-bool PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges) {
+bool CoreChecks::PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
+ const VkMappedMemoryRange *pMemRanges) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
skip |= ValidateMappedMemoryRangeDeviceLimits(device_data, "vkInvalidateMappedMemoryRanges", memRangeCount, pMemRanges);
@@ -10980,8 +10959,8 @@
return skip;
}
-void PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange *pMemRanges,
- VkResult result) {
+void CoreChecks::PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
+ const VkMappedMemoryRange *pMemRanges, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS == result) {
// Update our shadow copy with modified driver data
@@ -10989,7 +10968,7 @@
}
}
-bool PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) {
+bool CoreChecks::PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize *pCommittedMem) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto mem_info = GetMemObjInfo(dev_data, mem);
@@ -11006,8 +10985,8 @@
return skip;
}
-bool ValidateBindImageMemory(layer_data *device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
- const char *api_name) {
+bool CoreChecks::ValidateBindImageMemory(layer_data *device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ const char *api_name) {
bool skip = false;
IMAGE_STATE *image_state = GetImageState(device_data, image);
if (image_state) {
@@ -11023,7 +11002,7 @@
"%s: Binding memory to image %s but vkGetImageMemoryRequirements() has not been called on that image.",
api_name, device_data->report_data->FormatHandle(image_handle).c_str());
// Make the call for them so we can verify the state
- device_data->dispatch_table.GetImageMemoryRequirements(device_data->device, image, &image_state->requirements);
+ device_data->device_dispatch_table.GetImageMemoryRequirements(device_data->device, image, &image_state->requirements);
}
// Validate bound memory range information
@@ -11076,12 +11055,12 @@
return skip;
}
-bool PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
+bool CoreChecks::PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateBindImageMemory(device_data, image, mem, memoryOffset, "vkBindImageMemory()");
}
-void UpdateBindImageMemoryState(layer_data *device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
+void CoreChecks::UpdateBindImageMemoryState(layer_data *device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
IMAGE_STATE *image_state = GetImageState(device_data, image);
if (image_state) {
// Track bound memory range information
@@ -11097,13 +11076,15 @@
}
}
-void PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result) {
+void CoreChecks::PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
UpdateBindImageMemoryState(device_data, image, mem, memoryOffset);
}
-bool PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
+bool CoreChecks::PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount,
+ const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
char api_name[128];
@@ -11115,7 +11096,8 @@
return skip;
}
-bool PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos) {
+bool CoreChecks::PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
+ const VkBindImageMemoryInfoKHR *pBindInfos) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
char api_name[128];
@@ -11127,8 +11109,8 @@
return skip;
}
-void PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos,
- VkResult result) {
+void CoreChecks::PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
for (uint32_t i = 0; i < bindInfoCount; i++) {
@@ -11136,8 +11118,8 @@
}
}
-void PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR *pBindInfos,
- VkResult result) {
+void CoreChecks::PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
+ const VkBindImageMemoryInfoKHR *pBindInfos, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
for (uint32_t i = 0; i < bindInfoCount; i++) {
@@ -11145,7 +11127,7 @@
}
}
-bool PreCallValidateSetEvent(VkDevice device, VkEvent event) {
+bool CoreChecks::PreCallValidateSetEvent(VkDevice device, VkEvent event) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto event_state = GetEventNode(device_data, event);
@@ -11160,7 +11142,7 @@
return skip;
}
-void PreCallRecordSetEvent(VkDevice device, VkEvent event) {
+void CoreChecks::PreCallRecordSetEvent(VkDevice device, VkEvent event) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto event_state = GetEventNode(device_data, event);
if (event_state) {
@@ -11178,7 +11160,8 @@
}
}
-bool PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
+bool CoreChecks::PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
+ VkFence fence) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
auto pFence = GetFenceNode(device_data, fence);
bool skip = ValidateFenceForSubmit(device_data, pFence);
@@ -11306,8 +11289,8 @@
return skip;
}
-void PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence,
- VkResult result) {
+void CoreChecks::PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo,
+ VkFence fence, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
if (result != VK_SUCCESS) return;
uint64_t early_retire_seq = 0;
@@ -11423,8 +11406,8 @@
}
}
-void PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore, VkResult result) {
+void CoreChecks::PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
SEMAPHORE_NODE *sNode = &device_data->semaphoreMap[*pSemaphore];
@@ -11434,7 +11417,7 @@
sNode->scope = kSyncScopeInternal;
}
-static bool ValidateImportSemaphore(layer_data *device_data, VkSemaphore semaphore, const char *caller_name) {
+bool CoreChecks::ValidateImportSemaphore(layer_data *device_data, VkSemaphore semaphore, const char *caller_name) {
bool skip = false;
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(device_data, semaphore);
if (sema_node) {
@@ -11444,8 +11427,8 @@
return skip;
}
-static void RecordImportSemaphoreState(layer_data *device_data, VkSemaphore semaphore,
- VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
+void CoreChecks::RecordImportSemaphoreState(layer_data *device_data, VkSemaphore semaphore,
+ VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags) {
SEMAPHORE_NODE *sema_node = GetSemaphoreNode(device_data, semaphore);
if (sema_node && sema_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) &&
@@ -11458,15 +11441,14 @@
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
-bool PreCallValidateImportSemaphoreWin32HandleKHR(VkDevice device,
- const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
+bool CoreChecks::PreCallValidateImportSemaphoreWin32HandleKHR(
+ VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateImportSemaphore(device_data, pImportSemaphoreWin32HandleInfo->semaphore, "vkImportSemaphoreWin32HandleKHR");
}
-void PostCallRecordImportSemaphoreWin32HandleKHR(VkDevice device,
- const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo,
- VkResult result) {
+void CoreChecks::PostCallRecordImportSemaphoreWin32HandleKHR(
+ VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR *pImportSemaphoreWin32HandleInfo, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordImportSemaphoreState(device_data, pImportSemaphoreWin32HandleInfo->semaphore, pImportSemaphoreWin32HandleInfo->handleType,
@@ -11474,21 +11456,21 @@
}
#endif // VK_USE_PLATFORM_WIN32_KHR
-bool PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
+bool CoreChecks::PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateImportSemaphore(device_data, pImportSemaphoreFdInfo->semaphore, "vkImportSemaphoreFdKHR");
}
-void PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo,
- VkResult result) {
+void CoreChecks::PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordImportSemaphoreState(device_data, pImportSemaphoreFdInfo->semaphore, pImportSemaphoreFdInfo->handleType,
pImportSemaphoreFdInfo->flags);
}
-static void RecordGetExternalSemaphoreState(layer_data *device_data, VkSemaphore semaphore,
- VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
+void CoreChecks::RecordGetExternalSemaphoreState(layer_data *device_data, VkSemaphore semaphore,
+ VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type) {
SEMAPHORE_NODE *semaphore_state = GetSemaphoreNode(device_data, semaphore);
if (semaphore_state && handle_type != VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
// Cannot track semaphore state once it is exported, except for Sync FD handle types which have copy transference
@@ -11497,21 +11479,23 @@
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
-void PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
- HANDLE *pHandle, VkResult result) {
+void CoreChecks::PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device,
+ const VkSemaphoreGetWin32HandleInfoKHR *pGetWin32HandleInfo,
+ HANDLE *pHandle, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordGetExternalSemaphoreState(device_data, pGetWin32HandleInfo->semaphore, pGetWin32HandleInfo->handleType);
}
#endif
-void PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) {
+void CoreChecks::PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR *pGetFdInfo, int *pFd,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordGetExternalSemaphoreState(device_data, pGetFdInfo->semaphore, pGetFdInfo->handleType);
}
-static bool ValidateImportFence(layer_data *device_data, VkFence fence, const char *caller_name) {
+bool CoreChecks::ValidateImportFence(layer_data *device_data, VkFence fence, const char *caller_name) {
FENCE_NODE *fence_node = GetFenceNode(device_data, fence);
bool skip = false;
if (fence_node && fence_node->scope == kSyncScopeInternal && fence_node->state == FENCE_INFLIGHT) {
@@ -11522,8 +11506,8 @@
return skip;
}
-static void RecordImportFenceState(layer_data *device_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
- VkFenceImportFlagsKHR flags) {
+void CoreChecks::RecordImportFenceState(layer_data *device_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
+ VkFenceImportFlagsKHR flags) {
FENCE_NODE *fence_node = GetFenceNode(device_data, fence);
if (fence_node && fence_node->scope != kSyncScopeExternalPermanent) {
if ((handle_type == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR || flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) &&
@@ -11536,12 +11520,14 @@
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
-bool PreCallValidateImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
+bool CoreChecks::PreCallValidateImportFenceWin32HandleKHR(VkDevice device,
+ const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateImportFence(device_data, pImportFenceWin32HandleInfo->fence, "vkImportFenceWin32HandleKHR");
}
-void PostCallRecordImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo,
- VkResult result) {
+void CoreChecks::PostCallRecordImportFenceWin32HandleKHR(VkDevice device,
+ const VkImportFenceWin32HandleInfoKHR *pImportFenceWin32HandleInfo,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordImportFenceState(device_data, pImportFenceWin32HandleInfo->fence, pImportFenceWin32HandleInfo->handleType,
@@ -11549,17 +11535,19 @@
}
#endif // VK_USE_PLATFORM_WIN32_KHR
-bool PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
+bool CoreChecks::PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateImportFence(device_data, pImportFenceFdInfo->fence, "vkImportFenceFdKHR");
}
-void PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo, VkResult result) {
+void CoreChecks::PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR *pImportFenceFdInfo,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordImportFenceState(device_data, pImportFenceFdInfo->fence, pImportFenceFdInfo->handleType, pImportFenceFdInfo->flags);
}
-static void RecordGetExternalFenceState(layer_data *device_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
+void CoreChecks::RecordGetExternalFenceState(layer_data *device_data, VkFence fence,
+ VkExternalFenceHandleTypeFlagBitsKHR handle_type) {
FENCE_NODE *fence_state = GetFenceNode(device_data, fence);
if (fence_state) {
if (handle_type != VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR) {
@@ -11573,22 +11561,22 @@
}
#ifdef VK_USE_PLATFORM_WIN32_KHR
-void PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo, HANDLE *pHandle,
- VkResult result) {
+void CoreChecks::PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR *pGetWin32HandleInfo,
+ HANDLE *pHandle, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordGetExternalFenceState(device_data, pGetWin32HandleInfo->fence, pGetWin32HandleInfo->handleType);
}
#endif
-void PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) {
+void CoreChecks::PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR *pGetFdInfo, int *pFd, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordGetExternalFenceState(device_data, pGetFdInfo->fence, pGetFdInfo->handleType);
}
-void PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
- VkEvent *pEvent, VkResult result) {
+void CoreChecks::PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkEvent *pEvent, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
device_data->eventMap[*pEvent].needsSignaled = false;
@@ -11596,13 +11584,14 @@
device_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
}
-bool ValidateCreateSwapchain(layer_data *device_data, const char *func_name, VkSwapchainCreateInfoKHR const *pCreateInfo,
- SURFACE_STATE *surface_state, SWAPCHAIN_NODE *old_swapchain_state) {
+bool CoreChecks::ValidateCreateSwapchain(layer_data *device_data, const char *func_name,
+ VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
+ SWAPCHAIN_NODE *old_swapchain_state) {
VkDevice device = device_data->device;
// All physical devices and queue families are required to be able to present to any native window on Android; require the
// application to have established support on any other platform.
- if (!device_data->instance_data->extensions.vk_khr_android_surface) {
+ if (!device_data->instance_extensions.vk_khr_android_surface) {
auto support_predicate = [device_data](decltype(surface_state->gpu_queue_support)::value_type qs) -> bool {
// TODO: should restrict search only to queue families of VkDeviceQueueCreateInfos, not whole phys. device
return (qs.first.gpu == device_data->physical_device) && qs.second;
@@ -11644,7 +11633,7 @@
return true;
}
- auto physical_device_state = GetPhysicalDeviceState(device_data->instance_data, device_data->physical_device);
+ auto physical_device_state = GetPhysicalDeviceState();
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
HandleToUint64(device_data->physical_device), kVUID_Core_DrawState_SwapchainCreateBeforeQuery,
@@ -11822,7 +11811,7 @@
// Validate state for shared presentable case
if (VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR == pCreateInfo->presentMode ||
VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR == pCreateInfo->presentMode) {
- if (!device_data->extensions.vk_khr_shared_presentable_image) {
+ if (!device_data->device_extensions.vk_khr_shared_presentable_image) {
if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled,
"%s called with presentMode %s which requires the VK_KHR_shared_presentable_image extension, which has not "
@@ -11840,7 +11829,7 @@
}
if (pCreateInfo->flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) {
- if (!device_data->extensions.vk_khr_swapchain_mutable_format) {
+ if (!device_data->device_extensions.vk_khr_swapchain_mutable_format) {
if (log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
HandleToUint64(device), kVUID_Core_DrawState_ExtensionNotEnabled,
"%s: pCreateInfo->flags contains VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR which requires the "
@@ -11895,10 +11884,10 @@
return false;
}
-bool PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
+bool CoreChecks::PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
- auto surface_state = GetSurfaceState(device_data->instance_data, pCreateInfo->surface);
+ auto surface_state = GetSurfaceState(pCreateInfo->surface);
auto old_swapchain_state = GetSwapchainNode(device_data, pCreateInfo->oldSwapchain);
return ValidateCreateSwapchain(device_data, "vkCreateSwapchainKHR()", pCreateInfo, surface_state, old_swapchain_state);
}
@@ -11924,15 +11913,17 @@
return;
}
-void PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain, VkResult result) {
+void CoreChecks::PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
- auto surface_state = GetSurfaceState(device_data->instance_data, pCreateInfo->surface);
+ auto surface_state = GetSurfaceState(pCreateInfo->surface);
auto old_swapchain_state = GetSwapchainNode(device_data, pCreateInfo->oldSwapchain);
RecordCreateSwapchainState(device_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
}
-void PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!swapchain) return;
auto swapchain_data = GetSwapchainNode(device_data, swapchain);
@@ -11955,7 +11946,7 @@
}
}
- auto surface_state = GetSurfaceState(device_data->instance_data, swapchain_data->createInfo.surface);
+ auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
if (surface_state) {
if (surface_state->swapchain == swapchain_data) surface_state->swapchain = nullptr;
}
@@ -11964,8 +11955,8 @@
}
}
-bool PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
- VkImage *pSwapchainImages) {
+bool CoreChecks::PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
+ VkImage *pSwapchainImages) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
auto swapchain_state = GetSwapchainNode(device_data, swapchain);
bool skip = false;
@@ -11988,8 +11979,8 @@
return skip;
}
-void PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
- VkImage *pSwapchainImages, VkResult result) {
+void CoreChecks::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
+ VkImage *pSwapchainImages, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if ((result != VK_SUCCESS) && (result != VK_INCOMPLETE)) return;
@@ -12040,7 +12031,7 @@
}
}
-bool PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
+bool CoreChecks::PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
bool skip = false;
auto queue_state = GetQueueState(device_data, queue);
@@ -12084,7 +12075,7 @@
if (FindLayouts(device_data, image, layouts)) {
for (auto layout : layouts) {
if ((layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) &&
- (!device_data->extensions.vk_khr_shared_presentable_image ||
+ (!device_data->device_extensions.vk_khr_shared_presentable_image ||
(layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR))) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue),
@@ -12099,8 +12090,8 @@
// All physical devices and queue families are required to be able to present to any native window on Android; require
// the application to have established support on any other platform.
- if (!device_data->instance_data->extensions.vk_khr_android_surface) {
- auto surface_state = GetSurfaceState(device_data->instance_data, swapchain_data->createInfo.surface);
+ if (!device_data->instance_extensions.vk_khr_android_surface) {
+ auto surface_state = GetSurfaceState(swapchain_data->createInfo.surface);
auto support_it =
surface_state->gpu_queue_support.find({device_data->physical_device, queue_state->queueFamilyIndex});
@@ -12175,7 +12166,7 @@
return skip;
}
-void PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo, VkResult result) {
+void CoreChecks::PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
// Semaphore waits occur before error generation, if the call reached the ICD. (Confirm?)
for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
@@ -12205,14 +12196,14 @@
// its semaphore waits) /never/ participate in any completion proof.
}
-bool PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
- const VkSwapchainCreateInfoKHR *pCreateInfos, const VkAllocationCallbacks *pAllocator,
- VkSwapchainKHR *pSwapchains) {
+bool CoreChecks::PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
- auto surface_state = GetSurfaceState(device_data->instance_data, pCreateInfos[i].surface);
+ auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
auto old_swapchain_state = GetSwapchainNode(device_data, pCreateInfos[i].oldSwapchain);
std::stringstream func_name;
func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]()";
@@ -12223,21 +12214,22 @@
return skip;
}
-void PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains,
- VkResult result) {
+void CoreChecks::PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (pCreateInfos) {
for (uint32_t i = 0; i < swapchainCount; i++) {
- auto surface_state = GetSurfaceState(device_data->instance_data, pCreateInfos[i].surface);
+ auto surface_state = GetSurfaceState(pCreateInfos[i].surface);
auto old_swapchain_state = GetSwapchainNode(device_data, pCreateInfos[i].oldSwapchain);
RecordCreateSwapchainState(device_data, result, &pCreateInfos[i], &pSwapchains[i], surface_state, old_swapchain_state);
}
}
}
-bool ValidateAcquireNextImage(layer_data *device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
- VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name) {
+bool CoreChecks::ValidateAcquireNextImage(layer_data *device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, const char *func_name) {
bool skip = false;
if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
@@ -12268,7 +12260,7 @@
func_name);
}
- auto physical_device_state = GetPhysicalDeviceState(device_data->instance_data, device_data->physical_device);
+ auto physical_device_state = GetPhysicalDeviceState();
if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
[=](VkImage image) { return GetImageState(device_data, image)->acquired; });
@@ -12290,21 +12282,22 @@
return skip;
}
-bool PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
- VkFence fence, uint32_t *pImageIndex) {
+bool CoreChecks::PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateAcquireNextImage(device_data, device, swapchain, timeout, semaphore, fence, pImageIndex,
"vkAcquireNextImageKHR");
}
-bool PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex) {
+bool CoreChecks::PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
+ uint32_t *pImageIndex) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateAcquireNextImage(device_data, device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex, "vkAcquireNextImage2KHR");
}
-void RecordAcquireNextImageState(layer_data *device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
- VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
+void CoreChecks::RecordAcquireNextImageState(layer_data *device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
auto pFence = GetFenceNode(device_data, fence);
if (pFence && pFence->scope == kSyncScopeInternal) {
// Treat as inflight since it is valid to wait on this fence, even in cases where it is technically a temporary
@@ -12333,30 +12326,31 @@
}
}
-void PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
- VkFence fence, uint32_t *pImageIndex, VkResult result) {
+void CoreChecks::PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return;
RecordAcquireNextImageState(device_data, device, swapchain, timeout, semaphore, fence, pImageIndex);
}
-void PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo, uint32_t *pImageIndex,
- VkResult result) {
+void CoreChecks::PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR *pAcquireInfo,
+ uint32_t *pImageIndex, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if ((VK_SUCCESS != result) && (VK_SUBOPTIMAL_KHR != result)) return;
RecordAcquireNextImageState(device_data, device, pAcquireInfo->swapchain, pAcquireInfo->timeout, pAcquireInfo->semaphore,
pAcquireInfo->fence, pImageIndex);
}
-void PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount, VkPhysicalDevice *pPhysicalDevices,
- VkResult result) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
+void CoreChecks::PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
+ VkPhysicalDevice *pPhysicalDevices, VkResult result) {
+ instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
if ((NULL != pPhysicalDevices) && ((result == VK_SUCCESS || result == VK_INCOMPLETE))) {
for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
phys_device_state.phys_device = pPhysicalDevices[i];
// Init actual features for each physical device
- instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features2.features);
+ instance_data->instance_dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i],
+ &phys_device_state.features2.features);
}
}
}
@@ -12393,30 +12387,33 @@
return skip;
}
-bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties *pQueueFamilyProperties) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties()");
}
-bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
"vkGetPhysicalDeviceQueueFamilyProperties2()");
}
-bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
return ValidateCommonGetPhysicalDeviceQueueFamilyProperties(instance_data, physical_device_state, *pQueueFamilyPropertyCount,
(nullptr == pQueueFamilyProperties),
@@ -12441,10 +12438,10 @@
}
}
-void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties *pQueueFamilyProperties) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties *pQueueFamilyProperties) {
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
VkQueueFamilyProperties2KHR *pqfp = nullptr;
std::vector<VkQueueFamilyProperties2KHR> qfp;
@@ -12460,27 +12457,28 @@
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount, pqfp);
}
-void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
-void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+void CoreChecks::PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
assert(physical_device_state);
StateUpdateCommonGetPhysicalDeviceQueueFamilyProperties(physical_device_state, *pQueueFamilyPropertyCount,
pQueueFamilyProperties);
}
-bool PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
+bool CoreChecks::PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
+ const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
- auto surface_state = GetSurfaceState(instance_data, surface);
+ auto surface_state = GetSurfaceState(surface);
bool skip = false;
if ((surface_state) && (surface_state->swapchain)) {
skip |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT,
@@ -12490,7 +12488,8 @@
return skip;
}
-void PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface,
+ const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
instance_data->surface_map.erase(surface);
}
@@ -12499,16 +12498,18 @@
instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
}
-void PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
-void PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
@@ -12516,8 +12517,9 @@
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_IOS_MVK
-void PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
@@ -12525,8 +12527,9 @@
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_MACOS_MVK
-void PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
@@ -12534,17 +12537,19 @@
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
-void PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
}
-bool PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- struct wl_display *display) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ struct wl_display *display) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWaylandPresentationSupportKHR-queueFamilyIndex-01306",
"vkGetPhysicalDeviceWaylandPresentationSupportKHR", "queueFamilyIndex");
@@ -12552,16 +12557,18 @@
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
-void PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
}
-bool PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceWin32PresentationSupportKHR-queueFamilyIndex-01309",
"vkGetPhysicalDeviceWin32PresentationSupportKHR", "queueFamilyIndex");
@@ -12569,17 +12576,19 @@
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
-void PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
}
-bool PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- xcb_connection_t *connection, xcb_visualid_t visual_id) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex, xcb_connection_t *connection,
+ xcb_visualid_t visual_id) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXcbPresentationSupportKHR-queueFamilyIndex-01312",
"vkGetPhysicalDeviceXcbPresentationSupportKHR", "queueFamilyIndex");
@@ -12587,46 +12596,48 @@
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
-void PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface, VkResult result) {
+void CoreChecks::PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
RecordVulkanSurface(instance_data, pSurface);
}
-bool PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- Display *dpy, VisualID visualID) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex, Display *dpy,
+ VisualID visualID) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- const auto pd_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ const auto pd_state = GetPhysicalDeviceState(physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, pd_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceXlibPresentationSupportKHR-queueFamilyIndex-01315",
"vkGetPhysicalDeviceXlibPresentationSupportKHR", "queueFamilyIndex");
}
#endif // VK_USE_PLATFORM_XLIB_KHR
-void PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- VkSurfaceCapabilitiesKHR *pSurfaceCapabilities, VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ VkSurfaceCapabilitiesKHR *pSurfaceCapabilities,
+ VkResult result) {
if (VK_SUCCESS != result) return;
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
}
-void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
- VkSurfaceCapabilities2KHR *pSurfaceCapabilities, VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
+ VkSurfaceCapabilities2KHR *pSurfaceCapabilities,
+ VkResult result) {
if (VK_SUCCESS != result) return;
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities = pSurfaceCapabilities->surfaceCapabilities;
}
-void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- VkSurfaceCapabilities2EXT *pSurfaceCapabilities, VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ VkSurfaceCapabilities2EXT *pSurfaceCapabilities,
+ VkResult result) {
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
physical_device_state->surfaceCapabilities.minImageCount = pSurfaceCapabilities->minImageCount;
physical_device_state->surfaceCapabilities.maxImageCount = pSurfaceCapabilities->maxImageCount;
@@ -12640,31 +12651,29 @@
physical_device_state->surfaceCapabilities.supportedUsageFlags = pSurfaceCapabilities->supportedUsageFlags;
}
-bool PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- VkSurfaceKHR surface, VkBool32 *pSupported) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface, VkBool32 *pSupported) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- const auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ const auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
return ValidatePhysicalDeviceQueueFamily(instance_data, physical_device_state, queueFamilyIndex,
"VUID-vkGetPhysicalDeviceSurfaceSupportKHR-queueFamilyIndex-01269",
"vkGetPhysicalDeviceSurfaceSupportKHR", "queueFamilyIndex");
}
-void PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- VkSurfaceKHR surface, VkBool32 *pSupported, VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface, VkBool32 *pSupported, VkResult result) {
if (VK_SUCCESS != result) return;
- auto surface_state = GetSurfaceState(instance_data, surface);
+ auto surface_state = GetSurfaceState(surface);
surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported == VK_TRUE);
}
-void PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes,
- VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ uint32_t *pPresentModeCount, VkPresentModeKHR *pPresentModes,
+ VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
// TODO: This isn't quite right -- available modes may differ by surface AND physical device.
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
if (*pPresentModeCount) {
@@ -12680,11 +12689,12 @@
}
}
-bool PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats) {
+bool CoreChecks::PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ uint32_t *pSurfaceFormatCount,
+ VkSurfaceFormatKHR *pSurfaceFormats) {
auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
if (!pSurfaceFormats) return false;
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
bool skip = false;
switch (call_state) {
@@ -12713,13 +12723,12 @@
return skip;
}
-void PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t *pSurfaceFormatCount, VkSurfaceFormatKHR *pSurfaceFormats,
- VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ uint32_t *pSurfaceFormatCount,
+ VkSurfaceFormatKHR *pSurfaceFormats, VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
auto &call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
if (*pSurfaceFormatCount) {
@@ -12735,14 +12744,13 @@
}
}
-void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
- uint32_t *pSurfaceFormatCount, VkSurfaceFormat2KHR *pSurfaceFormats,
- VkResult result) {
- auto instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
+void CoreChecks::PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR *pSurfaceInfo,
+ uint32_t *pSurfaceFormatCount,
+ VkSurfaceFormat2KHR *pSurfaceFormats, VkResult result) {
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
- auto physicalDeviceState = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physicalDeviceState = GetPhysicalDeviceState(physicalDevice);
if (*pSurfaceFormatCount) {
if (physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState < QUERY_COUNT) {
physicalDeviceState->vkGetPhysicalDeviceSurfaceFormatsKHRState = QUERY_COUNT;
@@ -12760,61 +12768,54 @@
}
}
-void PreCallRecordQueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
+void CoreChecks::PreCallRecordQueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
BeginQueueDebugUtilsLabel(device_data->report_data, queue, pLabelInfo);
}
-void PostCallRecordQueueEndDebugUtilsLabelEXT(VkQueue queue) {
+void CoreChecks::PostCallRecordQueueEndDebugUtilsLabelEXT(VkQueue queue) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
EndQueueDebugUtilsLabel(device_data->report_data, queue);
}
-void PreCallRecordQueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
+void CoreChecks::PreCallRecordQueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
InsertQueueDebugUtilsLabel(device_data->report_data, queue, pLabelInfo);
}
-void PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
+void CoreChecks::PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
BeginCmdDebugUtilsLabel(device_data->report_data, commandBuffer, pLabelInfo);
}
-void PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
+void CoreChecks::PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
EndCmdDebugUtilsLabel(device_data->report_data, commandBuffer);
}
-void PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
+void CoreChecks::PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
InsertCmdDebugUtilsLabel(device_data->report_data, commandBuffer, pLabelInfo);
}
-void PostCallRecordCreateDebugUtilsMessengerEXT(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDebugUtilsMessengerEXT *pMessenger,
- VkResult result) {
+void CoreChecks::PostCallRecordCreateDebugUtilsMessengerEXT(VkInstance instance,
+ const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDebugUtilsMessengerEXT *pMessenger, VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if (VK_SUCCESS != result) return;
layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
}
-void PostCallRecordDestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PostCallRecordDestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
+ const VkAllocationCallbacks *pAllocator) {
if (!messenger) return;
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
}
-void PostCallRecordCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback,
- VkResult result) {
- instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
- if (VK_SUCCESS != result) return;
- layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
-}
-
-void PostCallDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PostCallRecordDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks *pAllocator) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
}
@@ -12828,30 +12829,31 @@
auto &phys_device_state = instance_data->physical_device_map[cur_phys_dev];
phys_device_state.phys_device = cur_phys_dev;
// Init actual features for each physical device
- instance_data->dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev, &phys_device_state.features2.features);
+ instance_data->instance_dispatch_table.GetPhysicalDeviceFeatures(cur_phys_dev,
+ &phys_device_state.features2.features);
}
}
}
}
-void PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
- VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
- VkResult result) {
+void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
PostRecordEnumeratePhysicalDeviceGroupsState(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
-void PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
- VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
- VkResult result) {
+void CoreChecks::PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t *pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupPropertiesKHR *pPhysicalDeviceGroupProperties,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map);
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
PostRecordEnumeratePhysicalDeviceGroupsState(instance_data, pPhysicalDeviceGroupCount, pPhysicalDeviceGroupProperties);
}
-bool ValidateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
- const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) {
+bool CoreChecks::ValidateDescriptorUpdateTemplate(const char *func_name, layer_data *device_data,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo) {
bool skip = false;
const auto layout = GetDescriptorSetLayout(device_data, pCreateInfo->descriptorSetLayout);
if (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET == pCreateInfo->templateType && !layout) {
@@ -12893,33 +12895,37 @@
return skip;
}
-bool PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
+bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplate()", device_data, pCreateInfo);
return skip;
}
-bool PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
+bool CoreChecks::PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = ValidateDescriptorUpdateTemplate("vkCreateDescriptorUpdateTemplateKHR()", device_data, pCreateInfo);
return skip;
}
-void PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!descriptorUpdateTemplate) return;
device_data->desc_template_map.erase(descriptorUpdateTemplate);
}
-void PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!descriptorUpdateTemplate) return;
device_data->desc_template_map.erase(descriptorUpdateTemplate);
@@ -12932,24 +12938,29 @@
device_data->desc_template_map[*pDescriptorUpdateTemplate] = std::move(template_state);
}
-void PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) {
+void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordCreateDescriptorUpdateTemplateState(device_data, pCreateInfo, pDescriptorUpdateTemplate);
}
-void PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate, VkResult result) {
+void CoreChecks::PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate,
+ VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordCreateDescriptorUpdateTemplateState(device_data, pCreateInfo, pDescriptorUpdateTemplate);
}
-bool ValidateUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) {
+bool CoreChecks::ValidateUpdateDescriptorSetWithTemplate(layer_data *device_data, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData) {
bool skip = false;
auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == device_data->desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
@@ -12960,26 +12971,29 @@
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Validate template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
- skip = cvdescriptorset::ValidateUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_state, pData);
+ skip = ValidateUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_state, pData);
}
}
return skip;
}
-bool PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) {
+bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
-bool PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) {
+bool CoreChecks::PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateUpdateDescriptorSetWithTemplate(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
-void RecordUpdateDescriptorSetWithTemplateState(layer_data *device_data, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) {
+void CoreChecks::RecordUpdateDescriptorSetWithTemplateState(layer_data *device_data, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData) {
auto const template_map_entry = device_data->desc_template_map.find(descriptorUpdateTemplate);
if ((template_map_entry == device_data->desc_template_map.end()) || (template_map_entry->second.get() == nullptr)) {
assert(0);
@@ -12987,19 +13001,21 @@
const TEMPLATE_STATE *template_state = template_map_entry->second.get();
// TODO: Record template push descriptor updates
if (template_state->create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET) {
- cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_state, pData);
+ PerformUpdateDescriptorSetsWithTemplateKHR(device_data, descriptorSet, template_state, pData);
}
}
}
-void PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void *pData) {
+void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordUpdateDescriptorSetWithTemplateState(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
-void PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void *pData) {
+void CoreChecks::PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
RecordUpdateDescriptorSetWithTemplateState(device_data, descriptorSet, descriptorUpdateTemplate, pData);
}
@@ -13013,9 +13029,9 @@
return dsl;
}
-bool PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- VkPipelineLayout layout, uint32_t set, const void *pData) {
+bool CoreChecks::PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkPipelineLayout layout, uint32_t set, const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
@@ -13093,9 +13109,9 @@
return skip;
}
-void PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- VkPipelineLayout layout, uint32_t set, const void *pData) {
+void CoreChecks::PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkPipelineLayout layout, uint32_t set, const void *pData) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
@@ -13115,9 +13131,10 @@
}
}
-static void RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_layer_data *instance_data, VkPhysicalDevice physicalDevice,
- uint32_t *pPropertyCount, void *pProperties) {
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+void CoreChecks::RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_layer_data *instance_data,
+ VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
+ void *pProperties) {
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (*pPropertyCount) {
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState < QUERY_COUNT) {
physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState = QUERY_COUNT;
@@ -13131,25 +13148,28 @@
}
}
-void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
- VkDisplayPlanePropertiesKHR *pProperties, VkResult result) {
+void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
+ VkDisplayPlanePropertiesKHR *pProperties,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_data, physicalDevice, pPropertyCount, pProperties);
}
-void PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
- VkDisplayPlaneProperties2KHR *pProperties, VkResult result) {
+void CoreChecks::PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t *pPropertyCount,
+ VkDisplayPlaneProperties2KHR *pProperties,
+ VkResult result) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
if ((VK_SUCCESS != result) && (VK_INCOMPLETE != result)) return;
RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_data, physicalDevice, pPropertyCount, pProperties);
}
-static bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
- VkPhysicalDevice physicalDevice, uint32_t planeIndex,
- const char *api_name) {
+bool CoreChecks::ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data *instance_data,
+ VkPhysicalDevice physicalDevice, uint32_t planeIndex,
+ const char *api_name) {
bool skip = false;
- auto physical_device_state = GetPhysicalDeviceState(instance_data, physicalDevice);
+ auto physical_device_state = GetPhysicalDeviceState(physicalDevice);
if (physical_device_state->vkGetPhysicalDeviceDisplayPlanePropertiesKHRState == UNCALLED) {
skip |=
log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
@@ -13170,8 +13190,8 @@
return skip;
}
-bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
- uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
+bool CoreChecks::PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
+ uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
@@ -13179,8 +13199,8 @@
return skip;
}
-bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex,
- VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
+bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
+ uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, planeIndex,
@@ -13188,9 +13208,9 @@
return skip;
}
-bool PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
- const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
- VkDisplayPlaneCapabilities2KHR *pCapabilities) {
+bool CoreChecks::PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
+ const VkDisplayPlaneInfo2KHR *pDisplayPlaneInfo,
+ VkDisplayPlaneCapabilities2KHR *pCapabilities) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
bool skip = false;
skip |= ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_data, physicalDevice, pDisplayPlaneInfo->planeIndex,
@@ -13198,38 +13218,40 @@
return skip;
}
-bool PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
+bool CoreChecks::PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer,
+ const VkDebugMarkerMarkerInfoEXT *pMarkerInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
return ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERBEGINEXT, "vkCmdDebugMarkerBeginEXT()");
}
-bool PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
+bool CoreChecks::PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
assert(cb_state);
return ValidateCmd(device_data, cb_state, CMD_DEBUGMARKERENDEXT, "vkCmdDebugMarkerEndEXT()");
}
-bool PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
- uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
+bool CoreChecks::PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
+ uint32_t discardRectangleCount, const VkRect2D *pDiscardRectangles) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(device_data, cb_state, CMD_SETDISCARDRECTANGLEEXT, "vkCmdSetDiscardRectangleEXT()");
}
-bool PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
+bool CoreChecks::PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
+ const VkSampleLocationsInfoEXT *pSampleLocationsInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
// Minimal validation for command buffer state
return ValidateCmd(device_data, cb_state, CMD_SETSAMPLELOCATIONSEXT, "vkCmdSetSampleLocationsEXT()");
}
-bool PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride) {
+bool CoreChecks::PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = false;
if (offset & 3) {
@@ -13269,8 +13291,9 @@
return skip;
}
-void PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer,
- VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride) {
+void CoreChecks::PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
@@ -13280,9 +13303,9 @@
AddCommandBufferBindingBuffer(device_data, cb_state, count_buffer_state);
}
-bool PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride) {
+bool CoreChecks::PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = false;
if (offset & 3) {
@@ -13323,9 +13346,9 @@
return skip;
}
-void PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride) {
+void CoreChecks::PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
@@ -13335,7 +13358,7 @@
AddCommandBufferBindingBuffer(device_data, cb_state, count_buffer_state);
}
-bool PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
+bool CoreChecks::PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWMESHTASKSNV,
"vkCmdDrawMeshTasksNV()", VK_QUEUE_GRAPHICS_BIT,
@@ -13344,14 +13367,14 @@
return skip;
}
-void PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
+void CoreChecks::PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
}
-bool PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- uint32_t drawCount, uint32_t stride) {
+bool CoreChecks::PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t drawCount, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdDrawType(device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWMESHTASKSINDIRECTNV,
"vkCmdDrawMeshTasksIndirectNV()", VK_QUEUE_GRAPHICS_BIT,
@@ -13365,8 +13388,8 @@
return skip;
}
-void PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- uint32_t drawCount, uint32_t stride) {
+void CoreChecks::PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t drawCount, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
UpdateStateCmdDrawType(device_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
@@ -13376,9 +13399,9 @@
}
}
-bool PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride) {
+bool CoreChecks::PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
bool skip = ValidateCmdDrawType(
device_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, CMD_DRAWMESHTASKSINDIRECTCOUNTNV,
@@ -13395,9 +13418,9 @@
return skip;
}
-void PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride) {
+void CoreChecks::PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset,
+ uint32_t maxDrawCount, uint32_t stride) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
GLOBAL_CB_NODE *cb_state = GetCBNode(device_data, commandBuffer);
BUFFER_STATE *buffer_state = GetBufferState(device_data, buffer);
@@ -13411,8 +13434,8 @@
}
}
-static bool ValidateCreateSamplerYcbcrConversion(const layer_data *device_data, const char *func_name,
- const VkSamplerYcbcrConversionCreateInfo *create_info) {
+bool CoreChecks::ValidateCreateSamplerYcbcrConversion(const layer_data *device_data, const char *func_name,
+ const VkSamplerYcbcrConversionCreateInfo *create_info) {
bool skip = false;
if (GetDeviceExtensions(device_data)->vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateSamplerYcbcrConversionANDROID(device_data, create_info);
@@ -13427,45 +13450,48 @@
return skip;
}
-bool PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSamplerYcbcrConversion *pYcbcrConversion) {
+bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSamplerYcbcrConversion *pYcbcrConversion) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateCreateSamplerYcbcrConversion(device_data, "vkCreateSamplerYcbcrConversion()", pCreateInfo);
}
-bool PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSamplerYcbcrConversion *pYcbcrConversion) {
+bool CoreChecks::PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device,
+ const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSamplerYcbcrConversion *pYcbcrConversion) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
return ValidateCreateSamplerYcbcrConversion(device_data, "vkCreateSamplerYcbcrConversionKHR()", pCreateInfo);
}
-static void RecordCreateSamplerYcbcrConversionState(layer_data *device_data, const VkSamplerYcbcrConversionCreateInfo *create_info,
- VkSamplerYcbcrConversion ycbcr_conversion) {
+void CoreChecks::RecordCreateSamplerYcbcrConversionState(layer_data *device_data,
+ const VkSamplerYcbcrConversionCreateInfo *create_info,
+ VkSamplerYcbcrConversion ycbcr_conversion) {
if (GetDeviceExtensions(device_data)->vk_android_external_memory_android_hardware_buffer) {
RecordCreateSamplerYcbcrConversionANDROID(device_data, create_info, ycbcr_conversion);
}
}
-void PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkSamplerYcbcrConversion *pYcbcrConversion,
- VkResult result) {
+void CoreChecks::PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordCreateSamplerYcbcrConversionState(device_data, pCreateInfo, *pYcbcrConversion);
}
-void PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator,
- VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) {
+void CoreChecks::PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device,
+ const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkSamplerYcbcrConversion *pYcbcrConversion, VkResult result) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (VK_SUCCESS != result) return;
RecordCreateSamplerYcbcrConversionState(device_data, pCreateInfo, *pYcbcrConversion);
}
-void PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ycbcrConversion) return;
if (GetDeviceExtensions(device_data)->vk_android_external_memory_android_hardware_buffer) {
@@ -13473,8 +13499,8 @@
}
}
-void PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks *pAllocator) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
if (!ycbcrConversion) return;
if (GetDeviceExtensions(device_data)->vk_android_external_memory_android_hardware_buffer) {
@@ -13482,7 +13508,7 @@
}
}
-bool PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) {
+bool CoreChecks::PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT *pInfo) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
@@ -13514,10 +13540,10 @@
return skip;
}
-void PostCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties *pPhysicalDeviceProperties) {
+void CoreChecks::PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties *pPhysicalDeviceProperties) {
instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map);
- if (instance_data->enabled.gpu_validation && instance_data->enabled.gpu_validation_reserve_binding_slot) {
+ if (GetEnables(instance_data)->gpu_validation && GetEnables(instance_data)->gpu_validation_reserve_binding_slot) {
if (pPhysicalDeviceProperties->limits.maxBoundDescriptorSets > 1) {
pPhysicalDeviceProperties->limits.maxBoundDescriptorSets -= 1;
} else {
@@ -13528,25 +13554,27 @@
}
}
-VkResult CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkValidationCacheEXT *pValidationCache) {
+VkResult CoreChecks::CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator,
+ VkValidationCacheEXT *pValidationCache) {
*pValidationCache = ValidationCache::Create(pCreateInfo);
return *pValidationCache ? VK_SUCCESS : VK_ERROR_INITIALIZATION_FAILED;
}
-void CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
- const VkAllocationCallbacks *pAllocator) {
+void CoreChecks::CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
+ const VkAllocationCallbacks *pAllocator) {
delete (ValidationCache *)validationCache;
}
-VkResult CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize, void *pData) {
+VkResult CoreChecks::CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t *pDataSize,
+ void *pData) {
size_t inSize = *pDataSize;
((ValidationCache *)validationCache)->Write(pDataSize, pData);
return (pData && *pDataSize != inSize) ? VK_INCOMPLETE : VK_SUCCESS;
}
-VkResult CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
- const VkValidationCacheEXT *pSrcCaches) {
+VkResult CoreChecks::CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
+ const VkValidationCacheEXT *pSrcCaches) {
layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
auto dst = (ValidationCache *)dstCache;
@@ -13567,5 +13595,3 @@
return result;
}
-
-} // namespace core_validation
diff --git a/layers/core_validation.h b/layers/core_validation.h
index 898bdb7..7d7953b 100644
--- a/layers/core_validation.h
+++ b/layers/core_validation.h
@@ -26,10 +26,12 @@
#include "core_validation_error_enums.h"
#include "core_validation_types.h"
#include "descriptor_sets.h"
+#include "shader_validation.h"
+#include "gpu_validation.h"
#include "vk_layer_logging.h"
#include "vulkan/vk_layer.h"
#include "vk_typemap_helper.h"
-#include "gpu_validation.h"
+#include "vk_layer_data.h"
#include <atomic>
#include <functional>
#include <memory>
@@ -39,37 +41,6 @@
#include <list>
#include <deque>
-/*
- * MTMTODO : Update this comment
- * Data Structure overview
- * There are 4 global STL(' maps
- * cbMap -- map of command Buffer (CB) objects to MT_CB_INFO structures
- * Each MT_CB_INFO struct has an stl list container with
- * memory objects that are referenced by this CB
- * memObjMap -- map of Memory Objects to MT_MEM_OBJ_INFO structures
- * Each MT_MEM_OBJ_INFO has two stl list containers with:
- * -- all CBs referencing this mem obj
- * -- all VK Objects that are bound to this memory
- * objectMap -- map of objects to MT_OBJ_INFO structures
- *
- * Algorithm overview
- * These are the primary events that should happen related to different objects
- * 1. Command buffers
- * CREATION - Add object,structure to map
- * CMD BIND - If mem associated, add mem reference to list container
- * DESTROY - Remove from map, decrement (and report) mem references
- * 2. Mem Objects
- * CREATION - Add object,structure to map
- * OBJ BIND - Add obj structure to list container for that mem node
- * CMB BIND - If mem-related add CB structure to list container for that mem node
- * DESTROY - Flag as errors any remaining refs and remove from map
- * 3. Generic Objects
- * MEM BIND - DESTROY any previous binding, Add obj node w/ ref to map, add obj ref to list container for that mem node
- * DESTROY - If mem bound, remove reference list container for that memInfo, remove object ref from map
- */
-// TODO : Is there a way to track when Cmd Buffer finishes & remove mem references at that point?
-// TODO : Could potentially store a list of freed mem allocs to flag when they're incorrectly used
-
enum SyncScope {
kSyncScopeInternal,
kSyncScopeExternalTemporary,
@@ -166,6 +137,10 @@
uint32_t queue_family_index;
};
+struct SubresourceRangeErrorCodes {
+ const char *base_mip_err, *mip_count_err, *base_layer_err, *layer_count_err;
+};
+
inline bool operator==(GpuQueue const& lhs, GpuQueue const& rhs) {
return (lhs.gpu == rhs.gpu && lhs.queue_family_index == rhs.queue_family_index);
}
@@ -190,32 +165,9 @@
using std::unordered_map;
-namespace core_validation {
-
-struct instance_layer_data {
- VkInstance instance = VK_NULL_HANDLE;
- debug_report_data* report_data = nullptr;
- std::vector<VkDebugReportCallbackEXT> logging_callback;
- std::vector<VkDebugUtilsMessengerEXT> logging_messenger;
- VkLayerInstanceDispatchTable dispatch_table;
-
- CHECK_DISABLED disabled = {};
- CHECK_ENABLED enabled = {};
-
- unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
- unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
-
- InstanceExtensions extensions;
- uint32_t api_version;
-};
-
-struct layer_data {
- debug_report_data* report_data = nullptr;
- VkLayerDispatchTable dispatch_table;
-
- DeviceExtensions extensions = {};
+class CoreChecks : public ValidationObject {
+ public:
std::unordered_set<VkQueue> queues; // All queues under given device
- // Layer specific data
unordered_map<VkSampler, std::unique_ptr<SAMPLER_STATE>> samplerMap;
unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> imageMap;
@@ -251,14 +203,26 @@
// Used for instance versions of this object
unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
+ // Link to the device's physical-device data
+ PHYSICAL_DEVICE_STATE* physical_device_state;
unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
- CHECK_DISABLED disabled = {};
- CHECK_ENABLED enabled = {};
- VkDevice device = VK_NULL_HANDLE;
- VkPhysicalDevice physical_device = VK_NULL_HANDLE;
+ // Link for derived device objects back to their parent instance object
+ CoreChecks* instance_state;
- instance_layer_data* instance_data = nullptr; // from device to enclosing instance
+ // Temporary object pointers
+ layer_data* device_data = this;
+ layer_data* instance_data = this;
+ layer_data* dev_data = this;
+ std::unordered_map<void*, layer_data*> layer_data_map;
+ std::unordered_map<void*, layer_data*> instance_layer_data_map;
+
+ dispatch_key get_dispatch_key(const void* object) { return nullptr; }
+
+ template <typename DATA_T>
+ DATA_T* GetLayerDataPtr(void* data_key, std::unordered_map<void*, DATA_T*>& layer_data_map) {
+ return this;
+ }
DeviceFeatures enabled_features = {};
// Device specific data
@@ -279,1742 +243,1376 @@
uint32_t api_version = 0;
GpuValidationState gpu_validation_state = {};
uint32_t physical_device_count;
-};
-VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
- VkInstance* pInstance);
+ // Class Declarations for helper functions
+ cvdescriptorset::DescriptorSet* GetSetNode(const layer_data*, VkDescriptorSet);
+ DESCRIPTOR_POOL_STATE* GetDescriptorPoolState(const layer_data*, const VkDescriptorPool);
+ BUFFER_STATE* GetBufferState(const layer_data*, VkBuffer);
+ IMAGE_STATE* GetImageState(const layer_data*, VkImage);
+ DEVICE_MEM_INFO* GetMemObjInfo(const layer_data*, VkDeviceMemory);
+ BUFFER_VIEW_STATE* GetBufferViewState(const layer_data*, VkBufferView);
+ SAMPLER_STATE* GetSamplerState(const layer_data*, VkSampler);
+ IMAGE_VIEW_STATE* GetAttachmentImageViewState(layer_data* dev_data, FRAMEBUFFER_STATE* framebuffer, uint32_t index);
+ IMAGE_VIEW_STATE* GetImageViewState(layer_data*, VkImageView);
+ SWAPCHAIN_NODE* GetSwapchainNode(const layer_data*, VkSwapchainKHR);
+ GLOBAL_CB_NODE* GetCBNode(layer_data const* my_data, const VkCommandBuffer cb);
+ PIPELINE_STATE* GetPipelineState(layer_data const* dev_data, VkPipeline pipeline);
+ RENDER_PASS_STATE* GetRenderPassState(layer_data const* dev_data, VkRenderPass renderpass);
+ std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const* dev_data, VkRenderPass renderpass);
+ FRAMEBUFFER_STATE* GetFramebufferState(const layer_data* my_data, VkFramebuffer framebuffer);
+ COMMAND_POOL_NODE* GetCommandPoolNode(layer_data* dev_data, VkCommandPool pool);
+ shader_module const* GetShaderModuleState(layer_data const* dev_data, VkShaderModule module);
+ const DeviceFeatures* GetEnabledFeatures(const layer_data* device_data);
+ FENCE_NODE* GetFenceNode(layer_data* dev_data, VkFence fence);
+ EVENT_STATE* GetEventNode(layer_data* dev_data, VkEvent event);
+ QUERY_POOL_NODE* GetQueryPoolNode(layer_data* dev_data, VkQueryPool query_pool);
+ QUEUE_STATE* GetQueueState(layer_data* dev_data, VkQueue queue);
+ SEMAPHORE_NODE* GetSemaphoreNode(layer_data* dev_data, VkSemaphore semaphore);
+ PHYSICAL_DEVICE_STATE* GetPhysicalDeviceState(VkPhysicalDevice phys);
+ PHYSICAL_DEVICE_STATE* GetPhysicalDeviceState();
+ SURFACE_STATE* GetSurfaceState(VkSurfaceKHR surface);
+ BINDABLE* GetObjectMemBinding(layer_data* dev_data, uint64_t handle, VulkanObjectType type);
+ bool VerifyQueueStateToSeq(layer_data* dev_data, QUEUE_STATE* initial_queue, uint64_t initial_seq);
+ void ClearCmdBufAndMemReferences(layer_data* dev_data, GLOBAL_CB_NODE* cb_node);
+ void ClearMemoryObjectBinding(layer_data* dev_data, uint64_t handle, VulkanObjectType type, VkDeviceMemory mem);
+ void ResetCommandBufferState(layer_data* dev_data, const VkCommandBuffer cb);
+ void SetMemBinding(layer_data* dev_data, VkDeviceMemory mem, BINDABLE* mem_binding, VkDeviceSize memory_offset, uint64_t handle,
+ VulkanObjectType type);
+ bool ValidateSetMemBinding(layer_data* dev_data, VkDeviceMemory mem, uint64_t handle, VulkanObjectType type,
+ const char* apiName);
+ bool SetSparseMemBinding(layer_data* dev_data, MEM_BINDING binding, uint64_t handle, VulkanObjectType type);
+ bool ValidateDeviceQueueFamily(layer_data* device_data, uint32_t queue_family, const char* cmd_name, const char* parameter_name,
+ const char* error_code, bool optional);
+ BASE_NODE* GetStateStructPtrFromObject(layer_data* dev_data, VK_OBJECT object_struct);
+ void RemoveCommandBufferBinding(layer_data* dev_data, VK_OBJECT const* object, GLOBAL_CB_NODE* cb_node);
+ bool ValidateBindBufferMemory(layer_data* device_data, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ const char* api_name);
+ void RecordGetBufferMemoryRequirementsState(layer_data* device_data, VkBuffer buffer,
+ VkMemoryRequirements* pMemoryRequirements);
+ void UpdateBindBufferMemoryState(layer_data* device_data, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset);
+ PIPELINE_LAYOUT_NODE const* GetPipelineLayout(layer_data const* dev_data, VkPipelineLayout pipeLayout);
+ const TEMPLATE_STATE* GetDescriptorTemplateState(const layer_data* dev_data,
+ VkDescriptorUpdateTemplateKHR descriptor_update_template);
+ bool ValidateGetImageMemoryRequirements2(layer_data* dev_data, const VkImageMemoryRequirementsInfo2* pInfo);
+ void RecordGetImageMemoryRequiementsState(layer_data* device_data, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+ void FreeCommandBufferStates(layer_data* dev_data, COMMAND_POOL_NODE* pool_state, const uint32_t command_buffer_count,
+ const VkCommandBuffer* command_buffers);
+ bool CheckCommandBuffersInFlight(layer_data* dev_data, COMMAND_POOL_NODE* pPool, const char* action, const char* error_code);
+ bool CheckCommandBufferInFlight(layer_data* dev_data, const GLOBAL_CB_NODE* cb_node, const char* action,
+ const char* error_code);
+ bool VerifyQueueStateToFence(layer_data* dev_data, VkFence fence);
+ void DecrementBoundResources(layer_data* dev_data, GLOBAL_CB_NODE const* cb_node);
+ bool VerifyWaitFenceState(layer_data* dev_data, VkFence fence, const char* apiCall);
+ void RetireFence(layer_data* dev_data, VkFence fence);
+ void StoreMemRanges(layer_data* dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size);
+ bool ValidateIdleDescriptorSet(const layer_data* dev_data, VkDescriptorSet set, const char* func_str);
+ void InitializeAndTrackMemory(layer_data* dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void** ppData);
+ bool ValidatePipelineLocked(layer_data* dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const& pPipelines,
+ int pipelineIndex);
+ bool ValidatePipelineUnlocked(layer_data* dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const& pPipelines,
+ int pipelineIndex);
+ void FreeDescriptorSet(layer_data* dev_data, cvdescriptorset::DescriptorSet* descriptor_set);
+ void DeletePools(layer_data* dev_data);
+ bool ValidImageBufferQueue(layer_data* dev_data, GLOBAL_CB_NODE* cb_node, const VK_OBJECT* object, VkQueue queue,
+ uint32_t count, const uint32_t* indices);
+ bool ValidateFenceForSubmit(layer_data* dev_data, FENCE_NODE* pFence);
+ void AddMemObjInfo(layer_data* dev_data, void* object, const VkDeviceMemory mem, const VkMemoryAllocateInfo* pAllocateInfo);
+ bool ValidateStatus(layer_data* dev_data, GLOBAL_CB_NODE* pNode, CBStatusFlags status_mask, VkFlags msg_flags,
+ const char* fail_msg, const char* msg_code);
+ bool ValidateDrawStateFlags(layer_data* dev_data, GLOBAL_CB_NODE* pCB, const PIPELINE_STATE* pPipe, bool indexed,
+ const char* msg_code);
+ bool LogInvalidAttachmentMessage(layer_data const* dev_data, const char* type1_string, const RENDER_PASS_STATE* rp1_state,
+ const char* type2_string, const RENDER_PASS_STATE* rp2_state, uint32_t primary_attach,
+ uint32_t secondary_attach, const char* msg, const char* caller, const char* error_code);
+ bool ValidateAttachmentCompatibility(layer_data const* dev_data, const char* type1_string, const RENDER_PASS_STATE* rp1_state,
+ const char* type2_string, const RENDER_PASS_STATE* rp2_state, uint32_t primary_attach,
+ uint32_t secondary_attach, const char* caller, const char* error_code);
+ bool ValidateSubpassCompatibility(layer_data const* dev_data, const char* type1_string, const RENDER_PASS_STATE* rp1_state,
+ const char* type2_string, const RENDER_PASS_STATE* rp2_state, const int subpass,
+ const char* caller, const char* error_code);
+ bool ValidateRenderPassCompatibility(layer_data const* dev_data, const char* type1_string, const RENDER_PASS_STATE* rp1_state,
+ const char* type2_string, const RENDER_PASS_STATE* rp2_state, const char* caller,
+ const char* error_code);
+ void UpdateDrawState(layer_data* dev_data, GLOBAL_CB_NODE* cb_state, const VkPipelineBindPoint bind_point);
+ bool ReportInvalidCommandBuffer(layer_data* dev_data, const GLOBAL_CB_NODE* cb_state, const char* call_source);
+ void InitGpuValidation(layer_data* instance_data);
-VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks* pAllocator);
+ bool ValidatePipelineVertexDivisors(layer_data* dev_data, std::vector<std::unique_ptr<PIPELINE_STATE>> const& pipe_state_vec,
+ const uint32_t count, const VkGraphicsPipelineCreateInfo* pipe_cis);
+ void AddFramebufferBinding(layer_data* dev_data, GLOBAL_CB_NODE* cb_state, FRAMEBUFFER_STATE* fb_state);
+ bool ValidateImageBarrierImage(layer_data* device_data, const char* funcName, GLOBAL_CB_NODE const* cb_state,
+ VkFramebuffer framebuffer, uint32_t active_subpass,
+ const safe_VkSubpassDescription2KHR& sub_desc, uint64_t rp_handle, uint32_t img_index,
+ const VkImageMemoryBarrier& img_barrier);
+ void RecordCmdBeginRenderPassState(layer_data* device_data, VkCommandBuffer commandBuffer,
+ const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassContents contents);
+ bool ValidateCmdBeginRenderPass(layer_data* device_data, VkCommandBuffer commandBuffer, RenderPassCreateVersion rp_version,
+ const VkRenderPassBeginInfo* pRenderPassBegin);
+ bool ValidateDependencies(layer_data* dev_data, FRAMEBUFFER_STATE const* framebuffer, RENDER_PASS_STATE const* renderPass);
+ bool ValidateBarriers(layer_data* device_data, const char* funcName, GLOBAL_CB_NODE* cb_state,
+ VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
+ const VkMemoryBarrier* pMemBarriers, uint32_t bufferBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemBarriers, uint32_t imageMemBarrierCount,
+ const VkImageMemoryBarrier* pImageMemBarriers);
+ bool ValidateCreateSwapchain(layer_data* device_data, const char* func_name, VkSwapchainCreateInfoKHR const* pCreateInfo,
+ SURFACE_STATE* surface_state, SWAPCHAIN_NODE* old_swapchain_state);
+ void RecordCmdPushDescriptorSetState(layer_data* device_data, GLOBAL_CB_NODE* cb_state, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites);
+ bool ValidatePipelineBindPoint(layer_data* device_data, GLOBAL_CB_NODE* cb_state, VkPipelineBindPoint bind_point,
+ const char* func_name, const std::map<VkPipelineBindPoint, std::string>& bind_errors);
+ bool ValidateMemoryIsMapped(layer_data* dev_data, const char* funcName, uint32_t memRangeCount,
+ const VkMappedMemoryRange* pMemRanges);
+ bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data* dev_data, uint32_t mem_range_count,
+ const VkMappedMemoryRange* mem_ranges);
+ void CopyNoncoherentMemoryFromDriver(layer_data* dev_data, uint32_t mem_range_count, const VkMappedMemoryRange* mem_ranges);
+ bool ValidateMappedMemoryRangeDeviceLimits(layer_data* dev_data, const char* func_name, uint32_t mem_range_count,
+ const VkMappedMemoryRange* mem_ranges);
+ BarrierOperationsType ComputeBarrierOperationsType(layer_data* device_data, GLOBAL_CB_NODE* cb_state,
+ uint32_t buffer_barrier_count, const VkBufferMemoryBarrier* buffer_barriers,
+ uint32_t image_barrier_count, const VkImageMemoryBarrier* image_barriers);
+ bool ValidateStageMasksAgainstQueueCapabilities(layer_data* dev_data, GLOBAL_CB_NODE const* cb_state,
+ VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
+ BarrierOperationsType barrier_op_type, const char* function,
+ const char* error_code);
+ bool SetEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+ bool ValidateRenderPassImageBarriers(layer_data* device_data, const char* funcName, GLOBAL_CB_NODE* cb_state,
+ uint32_t active_subpass, const safe_VkSubpassDescription2KHR& sub_desc, uint64_t rp_handle,
+ const safe_VkSubpassDependency2KHR* dependencies,
+ const std::vector<uint32_t>& self_dependencies, uint32_t image_mem_barrier_count,
+ const VkImageMemoryBarrier* image_barriers);
+ bool ValidateSecondaryCommandBufferState(layer_data* dev_data, GLOBAL_CB_NODE* pCB, GLOBAL_CB_NODE* pSubCB);
+ bool ValidateFramebuffer(layer_data* dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE* pCB,
+ VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE* pSubCB, const char* caller);
+ bool ValidateDescriptorUpdateTemplate(const char* func_name, layer_data* device_data,
+ const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo);
+ bool ValidateCreateSamplerYcbcrConversion(const layer_data* device_data, const char* func_name,
+ const VkSamplerYcbcrConversionCreateInfo* create_info);
+ void RecordCreateSamplerYcbcrConversionState(layer_data* device_data, const VkSamplerYcbcrConversionCreateInfo* create_info,
+ VkSamplerYcbcrConversion ycbcr_conversion);
+ bool ValidateImportFence(layer_data* device_data, VkFence fence, const char* caller_name);
+ void RecordImportFenceState(layer_data* device_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type,
+ VkFenceImportFlagsKHR flags);
+ void RecordGetExternalFenceState(layer_data* device_data, VkFence fence, VkExternalFenceHandleTypeFlagBitsKHR handle_type);
+ bool ValidateAcquireNextImage(layer_data* device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex, const char* func_name);
+ void RecordAcquireNextImageState(layer_data* device_data, VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
+ VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);
+ bool VerifyRenderAreaBounds(const layer_data* dev_data, const VkRenderPassBeginInfo* pRenderPassBegin);
+ bool ValidatePrimaryCommandBuffer(const layer_data* dev_data, const GLOBAL_CB_NODE* pCB, char const* cmd_name,
+ const char* error_code);
+ void RecordCmdNextSubpass(layer_data* device_data, VkCommandBuffer commandBuffer, VkSubpassContents contents);
+ bool ValidateCmdEndRenderPass(layer_data* device_data, RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer);
+ void RecordCmdEndRenderPassState(layer_data* device_data, VkCommandBuffer commandBuffer);
+ bool ValidateFramebufferCreateInfo(layer_data* dev_data, const VkFramebufferCreateInfo* pCreateInfo);
+ bool MatchUsage(layer_data* dev_data, uint32_t count, const VkAttachmentReference2KHR* attachments,
+ const VkFramebufferCreateInfo* fbci, VkImageUsageFlagBits usage_flag, const char* error_code);
+ bool CheckDependencyExists(const layer_data* dev_data, const uint32_t subpass, const std::vector<uint32_t>& dependent_subpasses,
+ const std::vector<DAGNode>& subpass_to_node, bool& skip);
+ bool CheckPreserved(const layer_data* dev_data, const VkRenderPassCreateInfo2KHR* pCreateInfo, const int index,
+ const uint32_t attachment, const std::vector<DAGNode>& subpass_to_node, int depth, bool& skip);
+ bool ValidateBindImageMemory(layer_data* device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ const char* api_name);
+ void UpdateBindImageMemoryState(layer_data* device_data, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset);
+ void RecordGetPhysicalDeviceDisplayPlanePropertiesState(instance_layer_data* instance_data, VkPhysicalDevice physicalDevice,
+ uint32_t* pPropertyCount, void* pProperties);
+ bool ValidateGetPhysicalDeviceDisplayPlanePropertiesKHRQuery(instance_layer_data* instance_data,
+ VkPhysicalDevice physicalDevice, uint32_t planeIndex,
+ const char* api_name);
+ bool ValidateQuery(VkQueue queue, GLOBAL_CB_NODE* pCB, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount);
+ bool IsQueryInvalid(layer_data* dev_data, QUEUE_STATE* queue_data, VkQueryPool queryPool, uint32_t queryIndex);
+ bool ValidateImportSemaphore(layer_data* device_data, VkSemaphore semaphore, const char* caller_name);
+ void RecordImportSemaphoreState(layer_data* device_data, VkSemaphore semaphore,
+ VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type, VkSemaphoreImportFlagsKHR flags);
+ void RecordGetExternalSemaphoreState(layer_data* device_data, VkSemaphore semaphore,
+ VkExternalSemaphoreHandleTypeFlagBitsKHR handle_type);
+ bool SetQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value);
+ bool ValidateCmdDrawType(layer_data* dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
+ CMD_TYPE cmd_type, const char* caller, VkQueueFlags queue_flags, const char* queue_flag_code,
+ const char* renderpass_msg_code, const char* pipebound_msg_code, const char* dynamic_state_msg_code);
+ void UpdateStateCmdDrawDispatchType(layer_data* dev_data, GLOBAL_CB_NODE* cb_state, VkPipelineBindPoint bind_point);
+ void UpdateStateCmdDrawType(layer_data* dev_data, GLOBAL_CB_NODE* cb_state, VkPipelineBindPoint bind_point);
+ bool ValidateCmdNextSubpass(layer_data* device_data, RenderPassCreateVersion rp_version, VkCommandBuffer commandBuffer);
+ bool RangesIntersect(layer_data const* dev_data, MEMORY_RANGE const* range1, VkDeviceSize offset, VkDeviceSize end);
+ bool RangesIntersect(layer_data const* dev_data, MEMORY_RANGE const* range1, MEMORY_RANGE const* range2, bool* skip,
+ bool skip_checks);
+ bool ValidateInsertMemoryRange(layer_data const* dev_data, uint64_t handle, DEVICE_MEM_INFO* mem_info,
+ VkDeviceSize memoryOffset, VkMemoryRequirements memRequirements, bool is_image, bool is_linear,
+ const char* api_name);
+ void InsertMemoryRange(layer_data const* dev_data, uint64_t handle, DEVICE_MEM_INFO* mem_info, VkDeviceSize memoryOffset,
+ VkMemoryRequirements memRequirements, bool is_image, bool is_linear);
+ bool ValidateInsertImageMemoryRange(layer_data const* dev_data, VkImage image, DEVICE_MEM_INFO* mem_info,
+ VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, bool is_linear,
+ const char* api_name);
+ void InsertImageMemoryRange(layer_data const* dev_data, VkImage image, DEVICE_MEM_INFO* mem_info, VkDeviceSize mem_offset,
+ VkMemoryRequirements mem_reqs, bool is_linear);
+ bool ValidateInsertBufferMemoryRange(layer_data const* dev_data, VkBuffer buffer, DEVICE_MEM_INFO* mem_info,
+ VkDeviceSize mem_offset, VkMemoryRequirements mem_reqs, const char* api_name);
+ void InsertBufferMemoryRange(layer_data const* dev_data, VkBuffer buffer, DEVICE_MEM_INFO* mem_info, VkDeviceSize mem_offset,
+ VkMemoryRequirements mem_reqs);
+ bool ValidateMemoryTypes(const layer_data* dev_data, const DEVICE_MEM_INFO* mem_info, const uint32_t memory_type_bits,
+ const char* funcName, const char* msgCode);
+ bool ValidateCommandBufferState(layer_data* dev_data, GLOBAL_CB_NODE* cb_state, const char* call_source,
+ int current_submit_count, const char* vu_id);
+ bool ValidateCommandBufferSimultaneousUse(layer_data* dev_data, GLOBAL_CB_NODE* pCB, int current_submit_count);
+ bool ValidateGetDeviceQueue(layer_data* device_data, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue,
+ const char* valid_qfi_vuid, const char* qfi_in_range_vuid);
+ void RecordGetDeviceQueueState(layer_data* device_data, uint32_t queue_family_index, VkQueue queue);
+ bool ValidateRenderpassAttachmentUsage(const layer_data* dev_data, RenderPassCreateVersion rp_version,
+ const VkRenderPassCreateInfo2KHR* pCreateInfo);
+ bool AddAttachmentUse(const layer_data* dev_data, RenderPassCreateVersion rp_version, uint32_t subpass,
+ std::vector<uint8_t>& attachment_uses, std::vector<VkImageLayout>& attachment_layouts,
+ uint32_t attachment, uint8_t new_use, VkImageLayout new_layout);
+ bool ValidateAttachmentIndex(const layer_data* dev_data, RenderPassCreateVersion rp_version, uint32_t attachment,
+ uint32_t attachment_count, const char* type);
+ bool ValidateCreateRenderPass(layer_data* dev_data, VkDevice device, RenderPassCreateVersion rp_version,
+ const VkRenderPassCreateInfo2KHR* pCreateInfo, RENDER_PASS_STATE* render_pass);
+ bool ValidateRenderPassPipelineBarriers(layer_data* device_data, const char* funcName, GLOBAL_CB_NODE* cb_state,
+ VkPipelineStageFlags src_stage_mask, VkPipelineStageFlags dst_stage_mask,
+ VkDependencyFlags dependency_flags, uint32_t mem_barrier_count,
+ const VkMemoryBarrier* mem_barriers, uint32_t buffer_mem_barrier_count,
+ const VkBufferMemoryBarrier* buffer_mem_barriers, uint32_t image_mem_barrier_count,
+ const VkImageMemoryBarrier* image_barriers);
+ bool CheckStageMaskQueueCompatibility(layer_data* dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
+ VkQueueFlags queue_flags, const char* function, const char* src_or_dest,
+ const char* error_code);
+ void RecordUpdateDescriptorSetWithTemplateState(layer_data* device_data, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
+ bool ValidateUpdateDescriptorSetWithTemplate(layer_data* device_data, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
+ bool ValidateMemoryIsBoundToBuffer(const layer_data*, const BUFFER_STATE*, const char*, const char*);
+ bool ValidateMemoryIsBoundToImage(const layer_data*, const IMAGE_STATE*, const char*, const char*);
+ void AddCommandBufferBindingSampler(GLOBAL_CB_NODE*, SAMPLER_STATE*);
+ void AddCommandBufferBindingImage(const layer_data*, GLOBAL_CB_NODE*, IMAGE_STATE*);
+ void AddCommandBufferBindingImageView(const layer_data*, GLOBAL_CB_NODE*, IMAGE_VIEW_STATE*);
+ void AddCommandBufferBindingBuffer(const layer_data*, GLOBAL_CB_NODE*, BUFFER_STATE*);
+ void AddCommandBufferBindingBufferView(const layer_data*, GLOBAL_CB_NODE*, BUFFER_VIEW_STATE*);
+ bool ValidateObjectNotInUse(const layer_data* dev_data, BASE_NODE* obj_node, VK_OBJECT obj_struct, const char* caller_name,
+ const char* error_code);
+ void InvalidateCommandBuffers(const layer_data* dev_data, std::unordered_set<GLOBAL_CB_NODE*> const& cb_nodes, VK_OBJECT obj);
+ void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO* mem_info);
+ void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO* mem_info);
+ void ClearMemoryObjectBindings(layer_data* dev_data, uint64_t handle, VulkanObjectType type);
+ bool ValidateCmdQueueFlags(layer_data* dev_data, const GLOBAL_CB_NODE* cb_node, const char* caller_name, VkQueueFlags flags,
+ const char* error_code);
+ bool InsideRenderPass(const layer_data* my_data, const GLOBAL_CB_NODE* pCB, const char* apiName, const char* msgCode);
+ bool OutsideRenderPass(const layer_data* my_data, GLOBAL_CB_NODE* pCB, const char* apiName, const char* msgCode);
-VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount,
- VkPhysicalDevice* pPhysicalDevices);
+ void SetLayout(layer_data* device_data, GLOBAL_CB_NODE* pCB, ImageSubresourcePair imgpair, const VkImageLayout& layout);
+ void SetLayout(layer_data* device_data, GLOBAL_CB_NODE* pCB, ImageSubresourcePair imgpair,
+ const IMAGE_CMD_BUF_LAYOUT_NODE& node);
+ void SetLayout(std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE>& imageLayoutMap, ImageSubresourcePair imgpair,
+ VkImageLayout layout);
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures);
+ bool ValidateImageSampleCount(layer_data* dev_data, IMAGE_STATE* image_state, VkSampleCountFlagBits sample_count,
+ const char* location, const std::string& msgCode);
+ bool ValidateCmdSubpassState(const layer_data* dev_data, const GLOBAL_CB_NODE* pCB, const CMD_TYPE cmd_type);
+ bool ValidateCmd(layer_data* dev_data, const GLOBAL_CB_NODE* cb_state, const CMD_TYPE cmd, const char* caller_name);
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
- VkFormatProperties* pFormatProperties);
+ // Prototypes for layer_data accessor functions. These should be in their own header file at some point
+ VkFormatProperties GetPDFormatProperties(const layer_data* device_data, const VkFormat format);
+ VkResult GetPDImageFormatProperties(layer_data*, const VkImageCreateInfo*, VkImageFormatProperties*);
+ VkResult GetPDImageFormatProperties2(layer_data*, const VkPhysicalDeviceImageFormatInfo2*, VkImageFormatProperties2*);
+ const debug_report_data* GetReportData(const layer_data*);
+ const VkLayerDispatchTable* GetDispatchTable(const layer_data*);
+ const VkPhysicalDeviceProperties* GetPDProperties(const layer_data*);
+ const VkPhysicalDeviceMemoryProperties* GetPhysicalDeviceMemoryProperties(const layer_data*);
+ const CHECK_DISABLED* GetDisables(layer_data*);
+ const CHECK_ENABLED* GetEnables(layer_data*);
+ std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>>* GetImageMap(layer_data*);
+ std::unordered_map<VkImage, std::vector<ImageSubresourcePair>>* GetImageSubresourceMap(layer_data*);
+ std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE>* GetImageLayoutMap(layer_data*);
+ std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const* GetImageLayoutMap(layer_data const*);
+ std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>>* GetBufferMap(layer_data* device_data);
+ std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>>* GetBufferViewMap(layer_data* device_data);
+ std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>>* GetImageViewMap(layer_data* device_data);
+ std::unordered_map<VkSamplerYcbcrConversion, uint64_t>* GetYcbcrConversionFormatMap(layer_data*);
+ std::unordered_set<uint64_t>* GetAHBExternalFormatsSet(layer_data*);
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
- VkImageType type, VkImageTiling tiling,
- VkImageUsageFlags usage, VkImageCreateFlags flags,
- VkImageFormatProperties* pImageFormatProperties);
+ const DeviceExtensions* GetDeviceExtensions(const layer_data*);
+ GpuValidationState* GetGpuValidationState(layer_data*);
+ const GpuValidationState* GetGpuValidationState(const layer_data*);
+ VkDevice GetDevice(const layer_data*);
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties);
+ uint32_t GetApiVersion(const layer_data*);
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
- uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties* pQueueFamilyProperties);
+ GlobalQFOTransferBarrierMap<VkImageMemoryBarrier>& GetGlobalQFOReleaseBarrierMap(
+ layer_data* dev_data, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag& type_tag);
+ GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier>& GetGlobalQFOReleaseBarrierMap(
+ layer_data* dev_data, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag& type_tag);
+ template <typename Barrier>
+ void RecordQueuedQFOTransferBarriers(layer_data* device_data, GLOBAL_CB_NODE* cb_state);
+ template <typename Barrier>
+ bool ValidateQueuedQFOTransferBarriers(layer_data* device_data, GLOBAL_CB_NODE* cb_state,
+ QFOTransferCBScoreboards<Barrier>* scoreboards);
+ bool ValidateQueuedQFOTransfers(layer_data* device_data, GLOBAL_CB_NODE* cb_state,
+ QFOTransferCBScoreboards<VkImageMemoryBarrier>* qfo_image_scoreboards,
+ QFOTransferCBScoreboards<VkBufferMemoryBarrier>* qfo_buffer_scoreboards);
+ template <typename BarrierRecord, typename Scoreboard>
+ bool ValidateAndUpdateQFOScoreboard(const debug_report_data* report_data, const GLOBAL_CB_NODE* cb_state, const char* operation,
+ const BarrierRecord& barrier, Scoreboard* scoreboard);
+ template <typename Barrier>
+ void RecordQFOTransferBarriers(layer_data* device_data, GLOBAL_CB_NODE* cb_state, uint32_t barrier_count,
+ const Barrier* barriers);
+ void RecordBarriersQFOTransfers(layer_data* device_data, GLOBAL_CB_NODE* cb_state, uint32_t bufferBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemBarriers, uint32_t imageMemBarrierCount,
+ const VkImageMemoryBarrier* pImageMemBarriers);
+ template <typename Barrier>
+ bool ValidateQFOTransferBarrierUniqueness(layer_data* device_data, const char* func_name, GLOBAL_CB_NODE* cb_state,
+ uint32_t barrier_count, const Barrier* barriers);
+ bool IsReleaseOp(layer_data* device_data, GLOBAL_CB_NODE* cb_state, VkImageMemoryBarrier const* barrier);
+ bool ValidateBarriersQFOTransferUniqueness(layer_data* device_data, const char* func_name, GLOBAL_CB_NODE* cb_state,
+ uint32_t bufferBarrierCount, const VkBufferMemoryBarrier* pBufferMemBarriers,
+ uint32_t imageMemBarrierCount, const VkImageMemoryBarrier* pImageMemBarriers);
+ bool ValidatePrimaryCommandBufferState(layer_data* dev_data, GLOBAL_CB_NODE* pCB, int current_submit_count,
+ QFOTransferCBScoreboards<VkImageMemoryBarrier>* qfo_image_scoreboards,
+ QFOTransferCBScoreboards<VkBufferMemoryBarrier>* qfo_buffer_scoreboards);
+ bool ValidatePipelineDrawtimeState(layer_data const* dev_data, LAST_BOUND_STATE const& state, const GLOBAL_CB_NODE* pCB,
+ CMD_TYPE cmd_type, PIPELINE_STATE const* pPipeline, const char* caller);
+ bool ValidateCmdBufDrawState(layer_data* dev_data, GLOBAL_CB_NODE* cb_node, CMD_TYPE cmd_type, const bool indexed,
+ const VkPipelineBindPoint bind_point, const char* function, const char* pipe_err_code,
+ const char* state_err_code);
+ void IncrementBoundObjects(layer_data* dev_data, GLOBAL_CB_NODE const* cb_node);
+ void IncrementResources(layer_data* dev_data, GLOBAL_CB_NODE* cb_node);
+ bool ValidateEventStageMask(VkQueue queue, GLOBAL_CB_NODE* pCB, uint32_t eventCount, size_t firstEventIndex,
+ VkPipelineStageFlags sourceStageMask);
+ void RetireWorkOnQueue(layer_data* dev_data, QUEUE_STATE* pQueue, uint64_t seq);
+ bool ValidateResources(layer_data* dev_data, GLOBAL_CB_NODE* cb_node);
+ bool ValidateQueueFamilyIndices(layer_data* dev_data, GLOBAL_CB_NODE* pCB, VkQueue queue);
+ VkResult CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache);
+ void CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
+ const VkAllocationCallbacks* pAllocator);
+ VkResult CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
+ const VkValidationCacheEXT* pSrcCaches);
+ VkResult CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize,
+ void* pData);
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceMemoryProperties* pMemoryProperties);
+ // Descriptor Set Validation Functions
+ bool ValidateUpdateDescriptorSetsWithTemplateKHR(layer_data* device_data, VkDescriptorSet descriptorSet,
+ const TEMPLATE_STATE* template_state, const void* pData);
+ void PerformUpdateDescriptorSetsWithTemplateKHR(layer_data* device_data, VkDescriptorSet descriptorSet,
+ const TEMPLATE_STATE* template_state, const void* pData);
+ void UpdateAllocateDescriptorSetsData(const layer_data* dev_data, const VkDescriptorSetAllocateInfo*,
+ cvdescriptorset::AllocateDescriptorSetsData*);
+ bool ValidateAllocateDescriptorSets(const layer_data*, const VkDescriptorSetAllocateInfo*,
+ const cvdescriptorset::AllocateDescriptorSetsData*);
+ void PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo*, const VkDescriptorSet*,
+ const cvdescriptorset::AllocateDescriptorSetsData*,
+ std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE*>*,
+ std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet*>*, layer_data*);
+ bool ValidateUpdateDescriptorSets(const debug_report_data* report_data, const layer_data* dev_data, uint32_t write_count,
+ const VkWriteDescriptorSet* p_wds, uint32_t copy_count, const VkCopyDescriptorSet* p_cds,
+ const char* func_name);
-VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char* pName);
+ // Stuff from shader_validation
+ bool ValidateAndCapturePipelineShaderState(layer_data* dev_data, PIPELINE_STATE* pPipeline);
+ bool ValidateComputePipeline(layer_data* dev_data, PIPELINE_STATE* pPipeline);
+ bool ValidateRayTracingPipelineNV(layer_data* dev_data, PIPELINE_STATE* pipeline);
+ bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
+ void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state);
+ void PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, VkResult result,
+ void* csm_state);
+ bool ValidatePipelineShaderStage(layer_data* dev_data, VkPipelineShaderStageCreateInfo const* pStage, PIPELINE_STATE* pipeline,
+ shader_module const** out_module, spirv_inst_iter* out_entrypoint, bool check_point_size);
+ bool ValidatePointListShaderState(const layer_data* dev_data, const PIPELINE_STATE* pipeline, shader_module const* src,
+ spirv_inst_iter entrypoint, VkShaderStageFlagBits stage);
+ bool ValidateShaderCapabilities(layer_data* dev_data, shader_module const* src, VkShaderStageFlagBits stage,
+ bool has_writable_descriptor);
+ bool ValidateShaderStageInputOutputLimits(layer_data* dev_data, shader_module const* src,
+ VkPipelineShaderStageCreateInfo const* pStage, PIPELINE_STATE* pipeline);
-VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char* pName);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char* pLayerName, uint32_t* pPropertyCount,
- VkExtensionProperties* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char* pLayerName,
- uint32_t* pPropertyCount, VkExtensionProperties* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t* pPropertyCount, VkLayerProperties* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
- VkLayerProperties* pProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
-
-VKAPI_ATTR VkResult VKAPI_CALL QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
-
-VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue);
-
-VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device);
-
-VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
- const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
-
-VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL MapMemory(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size,
- VkMemoryMapFlags flags, void** ppData);
-
-VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory memory);
-
-VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount,
- const VkMappedMemoryRange* pMemoryRanges);
-
-VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memoryRangeCount,
- const VkMappedMemoryRange* pMemoryRanges);
-
-VKAPI_ATTR void VKAPI_CALL GetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset);
-
-VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, VkFormat format,
- VkImageType type, VkSampleCountFlagBits samples,
- VkImageUsageFlags usage, VkImageTiling tiling,
- uint32_t* pPropertyCount,
- VkSparseImageFormatProperties* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo,
- VkFence fence);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkFence* pFence);
-
-VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence);
-
-VKAPI_ATTR VkResult VKAPI_CALL WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll,
- uint64_t timeout);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore);
-
-VKAPI_ATTR void VKAPI_CALL DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkEvent* pEvent);
-
-VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetEventStatus(VkDevice device, VkEvent event);
-
-VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event);
-
-VKAPI_ATTR VkResult VKAPI_CALL ResetEvent(VkDevice device, VkEvent event);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
-
-VKAPI_ATTR void VKAPI_CALL DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
- size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);
-
-VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkBufferView* pView);
-
-VKAPI_ATTR void VKAPI_CALL DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkImage* pImage);
-
-VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL GetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource* pSubresource,
- VkSubresourceLayout* pLayout);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkImageView* pView);
-
-VKAPI_ATTR void VKAPI_CALL DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
-
-VKAPI_ATTR void VKAPI_CALL DestroyShaderModule(VkDevice device, VkShaderModule shaderModule,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache);
-
-VKAPI_ATTR void VKAPI_CALL DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData);
-
-VKAPI_ATTR VkResult VKAPI_CALL MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount,
- const VkPipelineCache* pSrcCaches);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
- const VkGraphicsPipelineCreateInfo* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
- const VkComputePipelineCreateInfo* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
-
-VKAPI_ATTR void VKAPI_CALL DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
-
-VKAPI_ATTR void VKAPI_CALL DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSampler* pSampler);
-
-VKAPI_ATTR void VKAPI_CALL DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorSetLayout* pSetLayout);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
- VkDescriptorPoolResetFlags flags);
-
-VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
- VkDescriptorSet* pDescriptorSets);
-
-VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount,
- const VkDescriptorSet* pDescriptorSets);
-
-VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet* pDescriptorCopies);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
-
-VKAPI_ATTR void VKAPI_CALL DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
-
-VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL GetRenderAreaGranularity(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
-
-VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
-
-VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo,
- VkCommandBuffer* pCommandBuffers);
-
-VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers);
-
-VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer);
-
-VKAPI_ATTR VkResult VKAPI_CALL ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
-
-VKAPI_ATTR void VKAPI_CALL CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipeline pipeline);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkViewport* pViewports);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
- const VkRect2D* pScissors);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
- float depthBiasSlopeFactor);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask,
- uint32_t compareMask);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
-
-VKAPI_ATTR void VKAPI_CALL CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount,
- const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
- const uint32_t* pDynamicOffsets);
-
-VKAPI_ATTR void VKAPI_CALL CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkIndexType indexType);
-
-VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
- const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
-
-VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
- uint32_t firstVertex, uint32_t firstInstance);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
- uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount,
- uint32_t stride);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- uint32_t drawCount, uint32_t stride);
-
-VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY,
- uint32_t groupCountZ);
-
-VKAPI_ATTR void VKAPI_CALL CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
-
-VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
- uint32_t regionCount, const VkBufferCopy* pRegions);
-
-VKAPI_ATTR void VKAPI_CALL CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
- const VkImageCopy* pRegions);
-
-VKAPI_ATTR void VKAPI_CALL CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
- const VkImageBlit* pRegions, VkFilter filter);
-
-VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
- VkImageLayout dstImageLayout, uint32_t regionCount,
- const VkBufferImageCopy* pRegions);
-
-VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
-
-VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
- VkDeviceSize dataSize, const void* pData);
-
-VKAPI_ATTR void VKAPI_CALL CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
- VkDeviceSize size, uint32_t data);
-
-VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearColorValue* pColor, uint32_t rangeCount,
- const VkImageSubresourceRange* pRanges);
-
-VKAPI_ATTR void VKAPI_CALL CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
- const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount,
- const VkImageSubresourceRange* pRanges);
-
-VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
- const VkClearAttachment* pAttachments, uint32_t rectCount,
- const VkClearRect* pRects);
-
-VKAPI_ATTR void VKAPI_CALL CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
- VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
- const VkImageResolve* pRegions);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
-
-VKAPI_ATTR void VKAPI_CALL CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
-
-VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
- VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-
-VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
- VkQueryControlFlags flags);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query);
-
-VKAPI_ATTR void VKAPI_CALL CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount);
-
-VKAPI_ATTR void VKAPI_CALL CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
- VkQueryPool queryPool, uint32_t query);
-
-VKAPI_ATTR void VKAPI_CALL CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
- VkDeviceSize stride, VkQueryResultFlags flags);
-
-VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
- uint32_t offset, uint32_t size, const void* pValues);
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents);
-
-VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer);
-
-VKAPI_ATTR void VKAPI_CALL CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos);
-
-VKAPI_ATTR void VKAPI_CALL GetDeviceGroupPeerMemoryFeatures(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex,
- uint32_t remoteDeviceIndex,
- VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask);
-
-VKAPI_ATTR void VKAPI_CALL CmdDispatchBase(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY,
- uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ);
-
-VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount,
- VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceProperties2(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceFormatProperties2(VkPhysicalDevice physicalDevice, VkFormat format,
- VkFormatProperties2* pFormatProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
- VkImageFormatProperties2* pImageFormatProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
- uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2* pQueueFamilyProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceMemoryProperties2(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo,
- uint32_t* pPropertyCount,
- VkSparseImageFormatProperties2* pProperties);
-
-VKAPI_ATTR void VKAPI_CALL TrimCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);
-
-VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSamplerYcbcrConversion* pYcbcrConversion);
-
-VKAPI_ATTR void VKAPI_CALL DestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplate(VkDevice device,
- const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceExternalBufferProperties(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
- VkExternalBufferProperties* pExternalBufferProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceExternalFenceProperties(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
- VkExternalFenceProperties* pExternalFenceProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceExternalSemaphoreProperties(
- VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
- VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupport(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- VkDescriptorSetLayoutSupport* pSupport);
-
-VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- VkSurfaceKHR surface, VkBool32* pSupported);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- VkSurfaceCapabilitiesKHR* pSurfaceCapabilities);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t* pSurfaceFormatCount,
- VkSurfaceFormatKHR* pSurfaceFormats);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t* pPresentModeCount,
- VkPresentModeKHR* pPresentModes);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
-
-VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount,
- VkImage* pSwapchainImages);
-
-VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
- VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex);
-
-VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL
-GetDeviceGroupPresentCapabilitiesKHR(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetDeviceGroupSurfacePresentModesKHR(VkDevice device, VkSurfaceKHR surface,
- VkDeviceGroupPresentModeFlagsKHR* pModes);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDevicePresentRectanglesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t* pRectCount, VkRect2D* pRects);
-
-VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo,
- uint32_t* pImageIndex);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
- VkDisplayPropertiesKHR* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
- VkDisplayPlanePropertiesKHR* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
- uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
- uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayModeKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
- const VkDisplayModeCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
- uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
- const VkSwapchainCreateInfoKHR* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);
-
-#ifdef VK_USE_PLATFORM_XLIB_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-
-VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex, Display* dpy,
- VisualID visualID);
-#endif // VK_USE_PLATFORM_XLIB_KHR
-
-#ifdef VK_USE_PLATFORM_XCB_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-
-VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex, xcb_connection_t* connection,
- xcb_visualid_t visual_id);
-#endif // VK_USE_PLATFORM_XCB_KHR
-
-#ifdef VK_USE_PLATFORM_WAYLAND_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-
-VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- struct wl_display* display);
-#endif // VK_USE_PLATFORM_WAYLAND_KHR
-
-#ifdef VK_USE_PLATFORM_ANDROID_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-#endif // VK_USE_PLATFORM_ANDROID_KHR
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-
-VKAPI_ATTR VkBool32 VKAPI_CALL GetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex);
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceFeatures2KHR(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceProperties2KHR(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties2* pProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceFormatProperties2KHR(VkPhysicalDevice physicalDevice, VkFormat format,
- VkFormatProperties2* pFormatProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
- VkImageFormatProperties2* pImageFormatProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
- uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2* pQueueFamilyProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceMemoryProperties2KHR(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceMemoryProperties2* pMemoryProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceSparseImageFormatProperties2KHR(
- VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount,
- VkSparseImageFormatProperties2* pProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetDeviceGroupPeerMemoryFeaturesKHR(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex,
- uint32_t remoteDeviceIndex,
- VkPeerMemoryFeatureFlags* pPeerMemoryFeatures);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetDeviceMaskKHR(VkCommandBuffer commandBuffer, uint32_t deviceMask);
-
-VKAPI_ATTR void VKAPI_CALL CmdDispatchBaseKHR(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY,
- uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY,
- uint32_t groupCountZ);
-
-VKAPI_ATTR void VKAPI_CALL TrimCommandPoolKHR(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags);
-
-VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount,
- VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceExternalBufferPropertiesKHR(
- VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo,
- VkExternalBufferProperties* pExternalBufferProperties);
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryWin32HandleKHR(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
- HANDLE* pHandle);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryWin32HandlePropertiesKHR(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
- HANDLE handle,
- VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryFdKHR(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryFdPropertiesKHR(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd,
- VkMemoryFdPropertiesKHR* pMemoryFdProperties);
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceExternalSemaphorePropertiesKHR(
- VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo,
- VkExternalSemaphoreProperties* pExternalSemaphoreProperties);
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL
-ImportSemaphoreWin32HandleKHR(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreWin32HandleKHR(VkDevice device,
- const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
- HANDLE* pHandle);
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL ImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd);
-
-VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites);
-
-VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
- VkDescriptorUpdateTemplate descriptorUpdateTemplate,
- VkPipelineLayout layout, uint32_t set, const void* pData);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
- const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplate descriptorUpdateTemplate,
- const void* pData);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
- const VkSubpassEndInfoKHR* pSubpassEndInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainStatusKHR(VkDevice device, VkSwapchainKHR swapchain);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceExternalFencePropertiesKHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo,
- VkExternalFenceProperties* pExternalFenceProperties);
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL ImportFenceWin32HandleKHR(VkDevice device,
- const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
- HANDLE* pHandle);
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL ImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
- VkSurfaceCapabilities2KHR* pSurfaceCapabilities);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
- uint32_t* pSurfaceFormatCount,
- VkSurfaceFormat2KHR* pSurfaceFormats);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
- VkDisplayProperties2KHR* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice,
- uint32_t* pPropertyCount,
- VkDisplayPlaneProperties2KHR* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModeProperties2KHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
- uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
- const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
- VkDisplayPlaneCapabilities2KHR* pCapabilities);
-
-VKAPI_ATTR void VKAPI_CALL GetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-
-VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2* pSparseMemoryRequirements);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateSamplerYcbcrConversionKHR(VkDevice device,
- const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSamplerYcbcrConversion* pYcbcrConversion);
-
-VKAPI_ATTR void VKAPI_CALL DestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount,
- const VkBindBufferMemoryInfo* pBindInfos);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount,
- const VkBindImageMemoryInfo* pBindInfos);
-
-VKAPI_ATTR void VKAPI_CALL GetDescriptorSetLayoutSupportKHR(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- VkDescriptorSetLayoutSupport* pSupport);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount, uint32_t stride);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
- const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugReportCallbackEXT* pCallback);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
- VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location,
- int32_t messageCode, const char* pLayerPrefix, const char* pMessage);
-
-VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer);
-
-VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdBindTransformFeedbackBuffersEXT(VkCommandBuffer commandBuffer, uint32_t firstBinding,
- uint32_t bindingCount, const VkBuffer* pBuffers,
- const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes);
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
- uint32_t counterBufferCount, const VkBuffer* pCounterBuffers,
- const VkDeviceSize* pCounterBufferOffsets);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndTransformFeedbackEXT(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer,
- uint32_t counterBufferCount, const VkBuffer* pCounterBuffers,
- const VkDeviceSize* pCounterBufferOffsets);
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
- VkQueryControlFlags flags, uint32_t index);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndQueryIndexedEXT(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query,
- uint32_t index);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectByteCountEXT(VkCommandBuffer commandBuffer, uint32_t instanceCount,
- uint32_t firstInstance, VkBuffer counterBuffer,
- VkDeviceSize counterBufferOffset, uint32_t counterOffset,
- uint32_t vertexStride);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountAMD(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount, uint32_t stride);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetShaderInfoAMD(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage,
- VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceExternalImageFormatPropertiesNV(
- VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage,
- VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType,
- VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties);
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryWin32HandleNV(VkDevice device, VkDeviceMemory memory,
- VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle);
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-#endif // VK_USE_PLATFORM_WIN32_KHR
-
-#ifdef VK_USE_PLATFORM_VI_NN
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateViSurfaceNN(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-#endif // VK_USE_PLATFORM_VI_NN
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginConditionalRenderingEXT(VkCommandBuffer commandBuffer,
- const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndConditionalRenderingEXT(VkCommandBuffer commandBuffer);
-
-VKAPI_ATTR void VKAPI_CALL CmdProcessCommandsNVX(VkCommandBuffer commandBuffer,
- const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdReserveSpaceForCommandsNVX(VkCommandBuffer commandBuffer,
- const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateIndirectCommandsLayoutNVX(VkDevice device,
- const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout);
-
-VKAPI_ATTR void VKAPI_CALL DestroyIndirectCommandsLayoutNVX(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateObjectTableNVX(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable);
-
-VKAPI_ATTR void VKAPI_CALL DestroyObjectTableNVX(VkDevice device, VkObjectTableNVX objectTable,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL RegisterObjectsNVX(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount,
- const VkObjectTableEntryNVX* const* ppObjectTableEntries,
- const uint32_t* pObjectIndices);
-
-VKAPI_ATTR VkResult VKAPI_CALL UnregisterObjectsNVX(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount,
- const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceGeneratedCommandsPropertiesNVX(VkPhysicalDevice physicalDevice,
- VkDeviceGeneratedCommandsFeaturesNVX* pFeatures,
- VkDeviceGeneratedCommandsLimitsNVX* pLimits);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetViewportWScalingNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkViewportWScalingNV* pViewportWScalings);
-
-VKAPI_ATTR VkResult VKAPI_CALL ReleaseDisplayEXT(VkPhysicalDevice physicalDevice, VkDisplayKHR display);
-
-#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
-
-VKAPI_ATTR VkResult VKAPI_CALL AcquireXlibDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetRandROutputDisplayEXT(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput,
- VkDisplayKHR* pDisplay);
-#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- VkSurfaceCapabilities2EXT* pSurfaceCapabilities);
-
-VKAPI_ATTR VkResult VKAPI_CALL DisplayPowerControlEXT(VkDevice device, VkDisplayKHR display,
- const VkDisplayPowerInfoEXT* pDisplayPowerInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL RegisterDeviceEventEXT(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo,
- const VkAllocationCallbacks* pAllocator, VkFence* pFence);
-
-VKAPI_ATTR VkResult VKAPI_CALL RegisterDisplayEventEXT(VkDevice device, VkDisplayKHR display,
- const VkDisplayEventInfoEXT* pDisplayEventInfo,
- const VkAllocationCallbacks* pAllocator, VkFence* pFence);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainCounterEXT(VkDevice device, VkSwapchainKHR swapchain,
- VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetRefreshCycleDurationGOOGLE(VkDevice device, VkSwapchainKHR swapchain,
- VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPastPresentationTimingGOOGLE(VkDevice device, VkSwapchainKHR swapchain,
- uint32_t* pPresentationTimingCount,
- VkPastPresentationTimingGOOGLE* pPresentationTimings);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
- uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles);
-
-VKAPI_ATTR void VKAPI_CALL SetHdrMetadataEXT(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains,
- const VkHdrMetadataEXT* pMetadata);
-
-#ifdef VK_USE_PLATFORM_IOS_MVK
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-#endif // VK_USE_PLATFORM_IOS_MVK
-
-#ifdef VK_USE_PLATFORM_MACOS_MVK
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-#endif // VK_USE_PLATFORM_MACOS_MVK
-
-VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo);
-
-VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
-
-VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue);
-
-VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
-
-VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer);
-
-VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
- const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDebugUtilsMessengerEXT* pMessenger);
-
-VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
- VkDebugUtilsMessageTypeFlagsEXT messageTypes,
- const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData);
-
-#ifdef VK_USE_PLATFORM_ANDROID_KHR
-
-VKAPI_ATTR VkResult VKAPI_CALL GetAndroidHardwareBufferPropertiesANDROID(VkDevice device, const struct AHardwareBuffer* buffer,
- VkAndroidHardwareBufferPropertiesANDROID* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryAndroidHardwareBufferANDROID(VkDevice device,
- const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
- struct AHardwareBuffer** pBuffer);
-#endif // VK_USE_PLATFORM_ANDROID_KHR
-
-VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
- const VkSampleLocationsInfoEXT* pSampleLocationsInfo);
-
-VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceMultisamplePropertiesEXT(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples,
- VkMultisamplePropertiesEXT* pMultisampleProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetImageDrmFormatModifierPropertiesEXT(VkDevice device, VkImage image,
- VkImageDrmFormatModifierPropertiesEXT* pProperties);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkValidationCacheEXT* pValidationCache);
-
-VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
- const VkValidationCacheEXT* pSrcCaches);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize,
- void* pData);
-
-VKAPI_ATTR void VKAPI_CALL CmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView,
- VkImageLayout imageLayout);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
- uint32_t viewportCount,
- const VkShadingRatePaletteNV* pShadingRatePalettes);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetCoarseSampleOrderNV(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType,
- uint32_t customSampleOrderCount,
- const VkCoarseSampleOrderCustomNV* pCustomSampleOrders);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateAccelerationStructureNV(VkDevice device,
- const VkAccelerationStructureCreateInfoNV* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkAccelerationStructureNV* pAccelerationStructure);
-
-VKAPI_ATTR void VKAPI_CALL DestroyAccelerationStructureNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
- const VkAllocationCallbacks* pAllocator);
-
-VKAPI_ATTR void VKAPI_CALL GetAccelerationStructureMemoryRequirementsNV(
- VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements);
-
-VKAPI_ATTR VkResult VKAPI_CALL BindAccelerationStructureMemoryNV(VkDevice device, uint32_t bindInfoCount,
- const VkBindAccelerationStructureMemoryInfoNV* pBindInfos);
-
-VKAPI_ATTR void VKAPI_CALL CmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer,
- const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData,
- VkDeviceSize instanceOffset, VkBool32 update,
- VkAccelerationStructureNV dst, VkAccelerationStructureNV src,
- VkBuffer scratch, VkDeviceSize scratchOffset);
-
-VKAPI_ATTR void VKAPI_CALL CmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst,
- VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode);
-
-VKAPI_ATTR void VKAPI_CALL CmdTraceRaysNV(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer,
- VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer,
- VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride,
- VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset,
- VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer,
- VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride,
- uint32_t width, uint32_t height, uint32_t depth);
-
-VKAPI_ATTR VkResult VKAPI_CALL CreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
- const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetRayTracingShaderGroupHandlesNV(VkDevice device, VkPipeline pipeline, uint32_t firstGroup,
- uint32_t groupCount, size_t dataSize, void* pData);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetAccelerationStructureHandleNV(VkDevice device, VkAccelerationStructureNV accelerationStructure,
- size_t dataSize, void* pData);
-
-VKAPI_ATTR void VKAPI_CALL CmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer,
- uint32_t accelerationStructureCount,
- const VkAccelerationStructureNV* pAccelerationStructures,
- VkQueryType queryType, VkQueryPool queryPool,
- uint32_t firstQuery);
-
-VKAPI_ATTR VkResult VKAPI_CALL CompileDeferredNV(VkDevice device, VkPipeline pipeline, uint32_t shader);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetMemoryHostPointerPropertiesEXT(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType,
- const void* pHostPointer,
- VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties);
-
-VKAPI_ATTR void VKAPI_CALL CmdWriteBufferMarkerAMD(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
- VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceCalibrateableTimeDomainsEXT(VkPhysicalDevice physicalDevice,
- uint32_t* pTimeDomainCount,
- VkTimeDomainEXT* pTimeDomains);
-
-VKAPI_ATTR VkResult VKAPI_CALL GetCalibratedTimestampsEXT(VkDevice device, uint32_t timestampCount,
- const VkCalibratedTimestampInfoEXT* pTimestampInfos,
- uint64_t* pTimestamps, uint64_t* pMaxDeviation);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- uint32_t drawCount, uint32_t stride);
-
-VKAPI_ATTR void VKAPI_CALL CmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset,
- uint32_t maxDrawCount, uint32_t stride);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
- uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors);
-
-VKAPI_ATTR void VKAPI_CALL CmdSetCheckpointNV(VkCommandBuffer commandBuffer, const void* pCheckpointMarker);
-
-VKAPI_ATTR void VKAPI_CALL GetQueueCheckpointDataNV(VkQueue queue, uint32_t* pCheckpointDataCount,
- VkCheckpointDataNV* pCheckpointData);
-
-VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char* funcName);
-
-VKAPI_ATTR VkDeviceAddress VKAPI_CALL GetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT* pInfo);
-
-#ifdef VK_USE_PLATFORM_FUCHSIA
-VKAPI_ATTR VkResult VKAPI_CALL CreateImagePipeSurfaceFUCHSIA(VkInstance instance,
- const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
-#endif // VK_USE_PLATFORM_FUCHSIA
-
-using std::vector;
-SURFACE_STATE* GetSurfaceState(instance_layer_data* instance_data, VkSurfaceKHR surface);
-PHYSICAL_DEVICE_STATE* GetPhysicalDeviceState(instance_layer_data* instance_data, VkPhysicalDevice phys);
-PHYSICAL_DEVICE_STATE* GetPhysicalDeviceState(const layer_data* device_data);
-PHYSICAL_DEVICE_STATE* GetPhysicalDeviceState(layer_data* device_data);
-
-VkResult CoreLayerCreateValidationCacheEXT(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache);
-
-void CoreLayerDestroyValidationCacheEXT(VkDevice device, VkValidationCacheEXT validationCache,
- const VkAllocationCallbacks* pAllocator);
-
-VkResult CoreLayerMergeValidationCachesEXT(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount,
- const VkValidationCacheEXT* pSrcCaches);
-
-VkResult CoreLayerGetValidationCacheDataEXT(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData);
-
-bool PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkGraphicsPipelineCreateInfo* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state);
-void PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator,
- VkPipeline* pPipelines, void* cgpl_state);
-void PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkGraphicsPipelineCreateInfo* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result,
- void* cgpl_state);
-bool PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator,
- VkPipeline* pPipelines, void* pipe_state);
-void PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator,
- VkPipeline* pPipelines, VkResult result, void* pipe_state);
-bool PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
-void PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout, void* cpl_state);
-void PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
+ // Gpu Validation Functions
+ void GpuPreCallRecordCreateDevice(VkPhysicalDevice gpu, std::unique_ptr<safe_VkDeviceCreateInfo>& modified_create_info,
+ VkPhysicalDeviceFeatures* supported_features);
+ void GpuPostCallRecordCreateDevice(layer_data* dev_data);
+ void GpuPreCallRecordDestroyDevice(layer_data* dev_data);
+ void GpuPreCallRecordFreeCommandBuffers(layer_data* dev_data, uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+ bool GpuPreCallCreateShaderModule(layer_data* dev_data, const VkShaderModuleCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule,
+ uint32_t* unique_shader_id, VkShaderModuleCreateInfo* instrumented_create_info,
+ std::vector<unsigned int>* instrumented_pgm);
+ bool GpuPreCallCreatePipelineLayout(layer_data* device_data, const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout,
- VkResult result);
-bool PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
- VkDescriptorSet* pDescriptorSets, void* ads_state);
-void PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
- VkDescriptorSet* pDescriptorSets, VkResult result, void* ads_state);
-bool PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state);
-void PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
- const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+ std::vector<VkDescriptorSetLayout>* new_layouts,
+ VkPipelineLayoutCreateInfo* modified_create_info);
+ void GpuPostCallCreatePipelineLayout(layer_data* device_data, VkResult result);
+ void GpuPostCallQueueSubmit(layer_data* dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits,
+ VkFence fence);
+ void GpuPreCallValidateCmdWaitEvents(layer_data* dev_data, VkPipelineStageFlags sourceStageMask);
+ std::vector<safe_VkGraphicsPipelineCreateInfo> GpuPreCallRecordCreateGraphicsPipelines(
+ layer_data* dev_data, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, std::vector<std::unique_ptr<PIPELINE_STATE>>& pipe_state);
+ void GpuPostCallRecordCreateGraphicsPipelines(layer_data* dev_data, const uint32_t count,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
+ void GpuPreCallRecordDestroyPipeline(layer_data* dev_data, const VkPipeline pipeline);
+ void GpuAllocateValidationResources(layer_data* dev_data, const VkCommandBuffer cmd_buffer, VkPipelineBindPoint bind_point);
+ void AnalyzeAndReportError(const layer_data* dev_data, GLOBAL_CB_NODE* cb_node, VkQueue queue, uint32_t draw_index,
+ uint32_t* const debug_output_buffer);
+ void ProcessInstrumentationBuffer(const layer_data* dev_data, VkQueue queue, GLOBAL_CB_NODE* cb_node);
+ void SubmitBarrier(layer_data* dev_data, VkQueue queue);
+ bool GpuInstrumentShader(layer_data* dev_data, const VkShaderModuleCreateInfo* pCreateInfo, std::vector<unsigned int>& new_pgm,
+ uint32_t* unique_shader_id);
+ void ReportSetupProblem(const layer_data* dev_data, VkDebugReportObjectTypeEXT object_type, uint64_t object_handle,
+ const char* const specific_message);
+
+ // Buffer Validation Functions
+ template <class OBJECT, class LAYOUT>
+ void SetLayout(layer_data* device_data, OBJECT* pObject, VkImage image, VkImageSubresource range, const LAYOUT& layout);
+ template <class OBJECT, class LAYOUT>
+ void SetLayout(layer_data* device_data, OBJECT* pObject, ImageSubresourcePair imgpair, const LAYOUT& layout,
+ VkImageAspectFlags aspectMask);
+ // Remove the pending QFO release records from the global set
+ // Note that the type of the handle argument constrained to match Barrier type
+ // The defaulted BarrierRecord argument allows use to declare the type once, but is not intended to be specified by the caller
+ template <typename Barrier, typename BarrierRecord = QFOTransferBarrier<Barrier>>
+ void EraseQFOReleaseBarriers(layer_data* device_data, const typename BarrierRecord::HandleType& handle) {
+ GlobalQFOTransferBarrierMap<Barrier>& global_release_barriers =
+ GetGlobalQFOReleaseBarrierMap(device_data, typename BarrierRecord::Tag());
+ global_release_barriers.erase(handle);
+ }
+ bool ValidateCopyImageTransferGranularityRequirements(layer_data* device_data, const GLOBAL_CB_NODE* cb_node,
+ const IMAGE_STATE* src_img, const IMAGE_STATE* dst_img,
+ const VkImageCopy* region, const uint32_t i, const char* function);
+ bool ValidateIdleBuffer(layer_data* device_data, VkBuffer buffer);
+ bool ValidateUsageFlags(const layer_data* device_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
+ VulkanObjectType obj_type, const char* msgCode, char const* func_name, char const* usage_str);
+ bool ValidateImageSubresourceRange(const layer_data* device_data, const uint32_t image_mip_count,
+ const uint32_t image_layer_count, const VkImageSubresourceRange& subresourceRange,
+ const char* cmd_name, const char* param_name, const char* image_layer_count_var_name,
+ const uint64_t image_handle, SubresourceRangeErrorCodes errorCodes);
+ void SetImageLayout(layer_data* device_data, GLOBAL_CB_NODE* cb_node, const IMAGE_STATE* image_state,
+ VkImageSubresourceRange image_subresource_range, const VkImageLayout& layout);
+ void SetImageLayout(layer_data* device_data, GLOBAL_CB_NODE* cb_node, const IMAGE_STATE* image_state,
+ VkImageSubresourceLayers image_subresource_layers, const VkImageLayout& layout);
+ bool ValidateRenderPassLayoutAgainstFramebufferImageUsage(layer_data* device_data, RenderPassCreateVersion rp_version,
+ VkImageLayout layout, VkImage image, VkImageView image_view,
+ VkFramebuffer framebuffer, VkRenderPass renderpass,
+ uint32_t attachment_index, const char* variable_name);
+ bool ValidateBufferImageCopyData(const debug_report_data* report_data, uint32_t regionCount, const VkBufferImageCopy* pRegions,
+ IMAGE_STATE* image_state, const char* function);
+ bool ValidateBufferViewRange(const layer_data* device_data, const BUFFER_STATE* buffer_state,
+ const VkBufferViewCreateInfo* pCreateInfo, const VkPhysicalDeviceLimits* device_limits);
+ bool ValidateBufferViewBuffer(const layer_data* device_data, const BUFFER_STATE* buffer_state,
+ const VkBufferViewCreateInfo* pCreateInfo);
+
+ bool PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
+ VkImage* pImage);
+
+ void PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
+ VkImage* pImage, VkResult result);
+
+ void PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
+
+ bool PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator);
+
+ bool ValidateImageAttributes(layer_data* device_data, IMAGE_STATE* image_state, VkImageSubresourceRange range);
+
+ bool ValidateClearAttachmentExtent(layer_data* device_data, VkCommandBuffer command_buffer, uint32_t attachment_index,
+ FRAMEBUFFER_STATE* framebuffer, uint32_t fb_attachment, const VkRect2D& render_area,
+ uint32_t rect_count, const VkClearRect* clear_rects);
+ bool ValidateImageCopyData(const layer_data* device_data, const debug_report_data* report_data, const uint32_t regionCount,
+ const VkImageCopy* ic_regions, const IMAGE_STATE* src_state, const IMAGE_STATE* dst_state);
+
+ bool VerifyClearImageLayout(layer_data* device_data, GLOBAL_CB_NODE* cb_node, IMAGE_STATE* image_state,
+ VkImageSubresourceRange range, VkImageLayout dest_image_layout, const char* func_name);
+
+ bool VerifyImageLayout(layer_data const* device_data, GLOBAL_CB_NODE const* cb_node, IMAGE_STATE* image_state,
+ VkImageSubresourceLayers subLayers, VkImageLayout explicit_layout, VkImageLayout optimal_layout,
+ const char* caller, const char* layout_invalid_msg_code, const char* layout_mismatch_msg_code,
+ bool* error);
+
+ bool CheckItgExtent(layer_data* device_data, const GLOBAL_CB_NODE* cb_node, const VkExtent3D* extent, const VkOffset3D* offset,
+ const VkExtent3D* granularity, const VkExtent3D* subresource_extent, const VkImageType image_type,
+ const uint32_t i, const char* function, const char* member, const char* vuid);
+
+ bool CheckItgOffset(layer_data* device_data, const GLOBAL_CB_NODE* cb_node, const VkOffset3D* offset,
+ const VkExtent3D* granularity, const uint32_t i, const char* function, const char* member,
+ const char* vuid);
+ VkExtent3D GetScaledItg(layer_data* device_data, const GLOBAL_CB_NODE* cb_node, const IMAGE_STATE* img);
+ bool CopyImageMultiplaneValidation(const layer_data* dev_data, VkCommandBuffer command_buffer,
+ const IMAGE_STATE* src_image_state, const IMAGE_STATE* dst_image_state,
+ const VkImageCopy region);
+
+ void RecordClearImageLayout(layer_data* dev_data, GLOBAL_CB_NODE* cb_node, VkImage image, VkImageSubresourceRange range,
+ VkImageLayout dest_image_layout);
+
+ bool PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearColorValue* pColor, uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+ void PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearColorValue* pColor, uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+ bool PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+ void PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
+ const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges);
+
+ bool FindLayoutVerifyNode(layer_data const* device_data, GLOBAL_CB_NODE const* pCB, ImageSubresourcePair imgpair,
+ IMAGE_CMD_BUF_LAYOUT_NODE& node, const VkImageAspectFlags aspectMask);
+
+ bool FindLayoutVerifyLayout(layer_data const* device_data, ImageSubresourcePair imgpair, VkImageLayout& layout,
+ const VkImageAspectFlags aspectMask);
+
+ bool FindCmdBufLayout(layer_data const* device_data, GLOBAL_CB_NODE const* pCB, VkImage image, VkImageSubresource range,
+ IMAGE_CMD_BUF_LAYOUT_NODE& node);
+
+ bool FindGlobalLayout(layer_data* device_data, ImageSubresourcePair imgpair, VkImageLayout& layout);
+
+ bool FindLayouts(layer_data* device_data, VkImage image, std::vector<VkImageLayout>& layouts);
+
+ bool FindLayout(layer_data* device_data, const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE>& imageLayoutMap,
+ ImageSubresourcePair imgpair, VkImageLayout& layout);
+
+ bool FindLayout(const std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE>& imageLayoutMap, ImageSubresourcePair imgpair,
+ VkImageLayout& layout, const VkImageAspectFlags aspectMask);
+
+ void SetGlobalLayout(layer_data* device_data, ImageSubresourcePair imgpair, const VkImageLayout& layout);
+
+ void SetImageViewLayout(layer_data* device_data, GLOBAL_CB_NODE* pCB, VkImageView imageView, const VkImageLayout& layout);
+
+ void SetImageViewLayout(layer_data* device_data, GLOBAL_CB_NODE* cb_node, IMAGE_VIEW_STATE* view_state,
+ const VkImageLayout& layout);
+
+ bool VerifyFramebufferAndRenderPassLayouts(layer_data* dev_data, RenderPassCreateVersion rp_version, GLOBAL_CB_NODE* pCB,
+ const VkRenderPassBeginInfo* pRenderPassBegin,
+ const FRAMEBUFFER_STATE* framebuffer_state);
+
+ void TransitionAttachmentRefLayout(layer_data* dev_data, GLOBAL_CB_NODE* pCB, FRAMEBUFFER_STATE* pFramebuffer,
+ const safe_VkAttachmentReference2KHR& ref);
+
+ void TransitionSubpassLayouts(layer_data*, GLOBAL_CB_NODE*, const RENDER_PASS_STATE*, const int, FRAMEBUFFER_STATE*);
+
+ void TransitionBeginRenderPassLayouts(layer_data*, GLOBAL_CB_NODE*, const RENDER_PASS_STATE*, FRAMEBUFFER_STATE*);
+
+ bool ValidateImageAspectLayout(layer_data* device_data, GLOBAL_CB_NODE const* pCB, const VkImageMemoryBarrier* mem_barrier,
+ uint32_t level, uint32_t layer, VkImageAspectFlags aspect);
+
+ void TransitionImageAspectLayout(layer_data* device_data, GLOBAL_CB_NODE* pCB, const VkImageMemoryBarrier* mem_barrier,
+ uint32_t level, uint32_t layer, VkImageAspectFlags aspect_mask, VkImageAspectFlags aspect);
+
+ bool ValidateBarrierLayoutToImageUsage(layer_data* device_data, const VkImageMemoryBarrier* img_barrier, bool new_not_old,
+ VkImageUsageFlags usage, const char* func_name);
+
+ bool ValidateBarriersToImages(layer_data* device_data, GLOBAL_CB_NODE const* cb_state, uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers, const char* func_name);
+
+ void RecordQueuedQFOTransfers(layer_data* dev_data, GLOBAL_CB_NODE* pCB);
+ void EraseQFOImageRelaseBarriers(layer_data* device_data, const VkImage& image);
+
+ void TransitionImageLayouts(layer_data* device_data, GLOBAL_CB_NODE* cb_state, uint32_t memBarrierCount,
+ const VkImageMemoryBarrier* pImgMemBarriers);
+
+ void TransitionFinalSubpassLayouts(layer_data* dev_data, GLOBAL_CB_NODE* pCB, const VkRenderPassBeginInfo* pRenderPassBegin,
+ FRAMEBUFFER_STATE* framebuffer_state);
+
+ bool PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageCopy* pRegions);
+
+ bool PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
+ const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects);
+
+ bool PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageResolve* pRegions);
+
+ void PreCallRecordCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageResolve* pRegions);
+
+ bool PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
+ const VkImageBlit* pRegions, VkFilter filter);
+
+ void PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions,
+ VkFilter filter);
+
+ bool ValidateCmdBufImageLayouts(layer_data* device_data, GLOBAL_CB_NODE* pCB,
+ std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const& globalImageLayoutMap,
+ std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE>& overlayLayoutMap);
+
+ void UpdateCmdBufImageLayouts(layer_data* device_data, GLOBAL_CB_NODE* pCB);
+
+ bool ValidateMaskBitsFromLayouts(layer_data* device_data, VkCommandBuffer cmdBuffer, const VkAccessFlags& accessMask,
+ const VkImageLayout& layout, const char* type);
+
+ bool ValidateLayoutVsAttachmentDescription(const debug_report_data* report_data, RenderPassCreateVersion rp_version,
+ const VkImageLayout first_layout, const uint32_t attachment,
+ const VkAttachmentDescription2KHR& attachment_description);
+
+ bool ValidateLayouts(layer_data* dev_data, RenderPassCreateVersion rp_version, VkDevice device,
+ const VkRenderPassCreateInfo2KHR* pCreateInfo);
+
+ bool ValidateMapImageLayouts(layer_data* dev_data, VkDevice device, DEVICE_MEM_INFO const* mem_info, VkDeviceSize offset,
+ VkDeviceSize end_offset);
+
+ bool ValidateImageUsageFlags(layer_data* dev_data, IMAGE_STATE const* image_state, VkFlags desired, bool strict,
+ const char* msgCode, char const* func_name, char const* usage_string);
+
+ bool ValidateImageFormatFeatureFlags(layer_data* dev_data, IMAGE_STATE const* image_state, VkFormatFeatureFlags desired,
+ char const* func_name, const char* linear_vuid, const char* optimal_vuid);
+
+ bool ValidateImageSubresourceLayers(layer_data* dev_data, const GLOBAL_CB_NODE* cb_node,
+ const VkImageSubresourceLayers* subresource_layers, char const* func_name,
+ char const* member, uint32_t i);
+
+ bool ValidateBufferUsageFlags(const layer_data* dev_data, BUFFER_STATE const* buffer_state, VkFlags desired, bool strict,
+ const char* msgCode, char const* func_name, char const* usage_string);
+
+ bool PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer);
+
+ void PostCallRecordCreateBuffer(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
+ VkBuffer* pBuffer, VkResult result);
+
+ bool PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkBufferView* pView);
+
+ void PostCallRecordCreateBufferView(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkBufferView* pView, VkResult result);
+
+ bool ValidateImageAspectMask(const layer_data* device_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
+ const char* func_name, const char* vuid = "VUID-VkImageSubresource-aspectMask-parameter");
+
+ bool ValidateCreateImageViewSubresourceRange(const layer_data* device_data, const IMAGE_STATE* image_state,
+ bool is_imageview_2d_type, const VkImageSubresourceRange& subresourceRange);
+
+ bool ValidateCmdClearColorSubresourceRange(const layer_data* device_data, const IMAGE_STATE* image_state,
+ const VkImageSubresourceRange& subresourceRange, const char* param_name);
+
+ bool ValidateCmdClearDepthSubresourceRange(const layer_data* device_data, const IMAGE_STATE* image_state,
+ const VkImageSubresourceRange& subresourceRange, const char* param_name);
+
+ bool ValidateImageBarrierSubresourceRange(const layer_data* device_data, const IMAGE_STATE* image_state,
+ const VkImageSubresourceRange& subresourceRange, const char* cmd_name,
+ const char* param_name);
+
+ bool PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkImageView* pView);
+
+ void PostCallRecordCreateImageView(VkDevice device, const VkImageViewCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkImageView* pView, VkResult result);
+
+ bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data* device_data, const GLOBAL_CB_NODE* cb_node,
+ const IMAGE_STATE* img, const VkBufferImageCopy* region,
+ const uint32_t i, const char* function, const char* vuid);
+
+ bool ValidateImageMipLevel(layer_data* device_data, const GLOBAL_CB_NODE* cb_node, const IMAGE_STATE* img, uint32_t mip_level,
+ const uint32_t i, const char* function, const char* member, const char* vuid);
+
+ bool ValidateImageArrayLayerRange(layer_data* device_data, const GLOBAL_CB_NODE* cb_node, const IMAGE_STATE* img,
+ const uint32_t base_layer, const uint32_t layer_count, const uint32_t i, const char* function,
+ const char* member, const char* vuid);
+
+ void PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions);
+
+ bool PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
+ const VkBufferCopy* pRegions);
+
+ void PreCallRecordCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount,
+ const VkBufferCopy* pRegions);
+
+ bool PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
+
+ void PreCallRecordDestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator);
+
+ bool PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
+
+ void PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator);
+
+ bool PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
+
+ void PreCallRecordDestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator);
+
+ bool PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
+ uint32_t data);
+
+ void PreCallRecordCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size,
+ uint32_t data);
+
+ bool PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+
+ void PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+
+ bool PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+
+ void PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
+ VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions);
+
+ bool PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource* pSubresource,
+ VkSubresourceLayout* pLayout);
+ bool ValidateCreateImageANDROID(layer_data* device_data, const debug_report_data* report_data,
+ const VkImageCreateInfo* create_info);
+ void RecordCreateImageANDROID(const VkImageCreateInfo* create_info, IMAGE_STATE* is_node);
+ bool ValidateCreateImageViewANDROID(layer_data* device_data, const VkImageViewCreateInfo* create_info);
+ bool ValidateGetImageSubresourceLayoutANDROID(layer_data* device_data, const VkImage image);
+ bool ValidateQueueFamilies(layer_data* device_data, uint32_t queue_family_count, const uint32_t* queue_families,
+ const char* cmd_name, const char* array_parameter_name, const char* unique_error_code,
+ const char* valid_error_code, bool optional);
+ bool ValidateAllocateMemoryANDROID(layer_data* dev_data, const VkMemoryAllocateInfo* alloc_info);
+ bool ValidateGetImageMemoryRequirements2ANDROID(layer_data* dev_data, const VkImage image);
+ bool ValidateCreateSamplerYcbcrConversionANDROID(const layer_data* dev_data,
+ const VkSamplerYcbcrConversionCreateInfo* create_info);
+ void RecordCreateSamplerYcbcrConversionANDROID(layer_data* dev_data, const VkSamplerYcbcrConversionCreateInfo* create_info,
+ VkSamplerYcbcrConversion ycbcr_conversion);
+ void RecordDestroySamplerYcbcrConversionANDROID(layer_data* dev_data, VkSamplerYcbcrConversion ycbcr_conversion);
+ bool PreCallValidateCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state);
+ void PreCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* cgpl_state);
+ void PostCallRecordCreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkGraphicsPipelineCreateInfo* pCreateInfos,
const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result,
- void* pipe_state);
-bool ValidateQueueFamilies(layer_data* device_data, uint32_t queue_family_count, const uint32_t* queue_families,
- const char* cmd_name, const char* array_parameter_name, const char* unique_error_code,
- const char* valid_error_code, bool optional);
-void PostCallRecordCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
- VkInstance* pInstance, VkResult result);
-bool PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
-void PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
- VkDevice* pDevice, std::unique_ptr<safe_VkDeviceCreateInfo>& modified_create_info);
-void PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDevice* pDevice, VkResult result);
-bool PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
- VkDeviceSize dataSize, const void* pData);
-void PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize,
- const void* pData);
-void PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
- VkFence* pFence, VkResult result);
-bool PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
-void PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
-void PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue);
-bool PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSamplerYcbcrConversion* pYcbcrConversion);
-void PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion,
- VkResult result);
-bool PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+ void* cgpl_state);
+ bool PreCallValidateCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, void* pipe_state);
+ void PostCallRecordCreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkComputePipelineCreateInfo* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result,
+ void* pipe_state);
+ bool PreCallValidateCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout);
+ void PreCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout,
+ void* cpl_state);
+ void PostCallRecordCreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout,
+ VkResult result);
+ bool PreCallValidateAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
+ VkDescriptorSet* pDescriptorSets, void* ads_state);
+ void PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo,
+ VkDescriptorSet* pDescriptorSets, VkResult result, void* ads_state);
+ bool PreCallValidateCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines,
+ void* pipe_state);
+ void PostCallRecordCreateRayTracingPipelinesNV(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
+ const VkRayTracingPipelineCreateInfoNV* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines, VkResult result,
+ void* pipe_state);
+ void PostCallRecordCreateInstance(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
+ VkInstance* pInstance, VkResult result);
+ bool PreCallValidateCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDevice* pDevice);
+ void PreCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDevice* pDevice,
+ std::unique_ptr<safe_VkDeviceCreateInfo>& modified_create_info);
+ void PostCallRecordCreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDevice* pDevice, VkResult result);
+ bool PreCallValidateCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize dataSize, const void* pData);
+ void PostCallRecordCmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize dataSize, const void* pData);
+ void PostCallRecordCreateFence(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
+ VkFence* pFence, VkResult result);
+ bool PreCallValidateGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
+ void PostCallRecordGetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue);
+ void PostCallRecordGetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue);
+ bool PreCallValidateCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSamplerYcbcrConversion* pYcbcrConversion);
+ void PostCallRecordCreateSamplerYcbcrConversion(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
- VkSamplerYcbcrConversion* pYcbcrConversion);
-void PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSamplerYcbcrConversion* pYcbcrConversion, VkResult result);
-bool PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
-void PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
-void PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence, VkResult result);
-bool PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
- const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
-void PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
- const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory, VkResult result);
-bool PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout);
-void PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout,
- VkResult result);
-bool PreCallValidateGetFenceStatus(VkDevice device, VkFence fence);
-void PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result);
-bool PreCallValidateQueueWaitIdle(VkQueue queue);
-void PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result);
-bool PreCallValidateDeviceWaitIdle(VkDevice device);
-void PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result);
-bool PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
- size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
-void PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
- size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags,
- VkResult result);
-bool PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos);
-void PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos,
+ VkSamplerYcbcrConversion* pYcbcrConversion, VkResult result);
+ bool PreCallValidateCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSamplerYcbcrConversion* pYcbcrConversion);
+ void PostCallRecordCreateSamplerYcbcrConversionKHR(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSamplerYcbcrConversion* pYcbcrConversion, VkResult result);
+ bool PreCallValidateCmdDebugMarkerBeginEXT(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo);
+ void PreCallRecordDestroyDevice(VkDevice device, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence);
+ void PostCallRecordQueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence,
+ VkResult result);
+ bool PreCallValidateAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory);
+ void PostCallRecordAllocateMemory(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory, VkResult result);
+ bool PreCallValidateFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordFreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll,
+ uint64_t timeout);
+ void PostCallRecordWaitForFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll,
+ uint64_t timeout, VkResult result);
+ bool PreCallValidateGetFenceStatus(VkDevice device, VkFence fence);
+ void PostCallRecordGetFenceStatus(VkDevice device, VkFence fence, VkResult result);
+ bool PreCallValidateQueueWaitIdle(VkQueue queue);
+ void PostCallRecordQueueWaitIdle(VkQueue queue, VkResult result);
+ bool PreCallValidateDeviceWaitIdle(VkDevice device);
+ void PostCallRecordDeviceWaitIdle(VkDevice device, VkResult result);
+ bool PreCallValidateDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
+ size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags);
+ void PostCallRecordGetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
+ size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags,
+ VkResult result);
+ bool PreCallValidateBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos);
+ void PostCallRecordBindBufferMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos,
+ VkResult result);
+ bool PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos);
+ void PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos,
+ VkResult result);
+ bool PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset);
+ void PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
VkResult result);
-bool PreCallValidateBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos);
-void PostCallRecordBindBufferMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfoKHR* pBindInfos,
- VkResult result);
-bool PreCallValidateBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset);
-void PostCallRecordBindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset,
- VkResult result);
-void PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
-void PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR* pInfo,
- VkMemoryRequirements2KHR* pMemoryRequirements);
-void PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR* pInfo,
- VkMemoryRequirements2KHR* pMemoryRequirements);
-bool PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-bool PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
+ void PostCallRecordGetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements);
+ void PostCallRecordGetBufferMemoryRequirements2(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR* pInfo,
+ VkMemoryRequirements2KHR* pMemoryRequirements);
+ void PostCallRecordGetBufferMemoryRequirements2KHR(VkDevice device, const VkBufferMemoryRequirementsInfo2KHR* pInfo,
+ VkMemoryRequirements2KHR* pMemoryRequirements);
+ bool PreCallValidateGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+ bool PreCallValidateGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+ void PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
+ void PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
VkMemoryRequirements2* pMemoryRequirements);
-void PostCallRecordGetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements);
-void PostCallRecordGetImageMemoryRequirements2(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-void PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements);
-void PostCallRecordGetImageSparseMemoryRequirements2(IMAGE_STATE* image_state, uint32_t req_count,
- VkSparseImageMemoryRequirements2KHR* reqs);
-void PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
-void PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements);
-void PostCallRecordGetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
- uint32_t* pSparseMemoryRequirementCount,
- VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements);
-bool PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
- VkImageFormatProperties2* pImageFormatProperties);
-bool PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
- VkImageFormatProperties2* pImageFormatProperties);
-void PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
- const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
- const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers);
-void PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
- const VkCommandBuffer* pCommandBuffers);
-bool PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
-void PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool, VkResult result);
-bool PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
-void PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool, VkResult result);
-bool PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
-void PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result);
-bool PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
-void PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkResult result);
-bool PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
-void PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
- VkSampler* pSampler, VkResult result);
-bool PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);
-void PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout,
- VkResult result);
-void PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool,
- VkResult result);
-bool PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
-void PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags,
- VkResult result);
-bool PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
- const VkDescriptorSet* pDescriptorSets);
-void PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
- const VkDescriptorSet* pDescriptorSets);
-bool PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet* pDescriptorCopies);
-void PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount,
- const VkCopyDescriptorSet* pDescriptorCopies);
-void PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pCreateInfo,
- VkCommandBuffer* pCommandBuffer, VkResult result);
-bool PreCallValidateBeginCommandBuffer(const VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
-void PreCallRecordBeginCommandBuffer(const VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
-bool PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer);
-void PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result);
-bool PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
-void PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result);
-bool PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
-void PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
-bool PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkViewport* pViewports);
-void PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkViewport* pViewports);
-bool PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
- const VkRect2D* pScissors);
-void PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
- const VkRect2D* pScissors);
-bool PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
- uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors);
-void PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
- uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors);
-bool PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);
-void PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);
-bool PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
- uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes);
-void PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
- const VkShadingRatePaletteNV* pShadingRatePalettes);
-bool PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth);
-void PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth);
-bool PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
- float depthBiasSlopeFactor);
-void PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
- float depthBiasSlopeFactor);
-bool PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]);
-void PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]);
-bool PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
-void PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
-bool PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
-void PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
-bool PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
-void PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
-bool PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
-void PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
-bool PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
- const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
- const uint32_t* pDynamicOffsets);
-void PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
- const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
- const uint32_t* pDynamicOffsets);
-bool PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites);
-void PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
- VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
- const VkWriteDescriptorSet* pDescriptorWrites);
-bool PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
-void PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType);
-bool PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
- const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
-void PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
- const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
-bool PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
- uint32_t firstInstance);
-void PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
- uint32_t firstInstance);
-void PostCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
- uint32_t firstInstance);
-bool PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
- int32_t vertexOffset, uint32_t firstInstance);
-void PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
- int32_t vertexOffset, uint32_t firstInstance);
-void PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex,
- int32_t vertexOffset, uint32_t firstInstance);
-bool PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride);
-void PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride);
-void PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride);
-bool PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-bool PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
-void PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
-void PostCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
-bool PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
-void PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
-void PostCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
-bool PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride);
-void PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride);
-void PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
- uint32_t stride);
-bool PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
-void PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
-bool PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
-void PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
-bool PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-void PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-void PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
- VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-bool PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
- uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-void PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
+ void PostCallRecordGetImageMemoryRequirements2KHR(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements);
+ void PostCallRecordGetImageSparseMemoryRequirements(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements* pSparseMemoryRequirements);
+ void PostCallRecordGetImageSparseMemoryRequirements2(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements);
+ void PostCallRecordGetImageSparseMemoryRequirements2KHR(VkDevice device, const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
+ uint32_t* pSparseMemoryRequirementCount,
+ VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements);
+ bool PreCallValidateGetPhysicalDeviceImageFormatProperties2(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+ VkImageFormatProperties2* pImageFormatProperties);
+ bool PreCallValidateGetPhysicalDeviceImageFormatProperties2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+ VkImageFormatProperties2* pImageFormatProperties);
+ void PreCallRecordDestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout,
+ const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout,
+ const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
+ const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+ void PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers);
+ bool PreCallValidateCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool);
+ void PostCallRecordCreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool, VkResult result);
+ bool PreCallValidateCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool);
+ void PostCallRecordCreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool, VkResult result);
+ bool PreCallValidateDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags);
+ void PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result);
+ bool PreCallValidateResetFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences);
+ void PostCallRecordResetFences(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkResult result);
+ bool PreCallValidateDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator);
+ void PostCallRecordCreateSampler(VkDevice device, const VkSamplerCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSampler* pSampler, VkResult result);
+ bool PreCallValidateCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout);
+ void PostCallRecordCreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout,
+ VkResult result);
+ void PostCallRecordCreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool,
+ VkResult result);
+ bool PreCallValidateResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags);
+ void PostCallRecordResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags,
+ VkResult result);
+ bool PreCallValidateFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
+ const VkDescriptorSet* pDescriptorSets);
+ void PreCallRecordFreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
+ const VkDescriptorSet* pDescriptorSets);
+ bool PreCallValidateUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet* pDescriptorCopies);
+ void PreCallRecordUpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount,
+ const VkCopyDescriptorSet* pDescriptorCopies);
+ void PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo* pCreateInfo,
+ VkCommandBuffer* pCommandBuffer, VkResult result);
+ bool PreCallValidateBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
+ void PreCallRecordBeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo);
+ bool PreCallValidateEndCommandBuffer(VkCommandBuffer commandBuffer);
+ void PostCallRecordEndCommandBuffer(VkCommandBuffer commandBuffer, VkResult result);
+ bool PreCallValidateResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags);
+ void PostCallRecordResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags, VkResult result);
+ bool PreCallValidateCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+ void PreCallRecordCmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline);
+ bool PreCallValidateCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
+ const VkViewport* pViewports);
+ void PreCallRecordCmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount,
+ const VkViewport* pViewports);
+ bool PreCallValidateCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
+ const VkRect2D* pScissors);
+ void PreCallRecordCmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount,
+ const VkRect2D* pScissors);
+ bool PreCallValidateCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
+ uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors);
+ void PreCallRecordCmdSetExclusiveScissorNV(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor,
+ uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors);
+ bool PreCallValidateCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);
+ void PreCallRecordCmdBindShadingRateImageNV(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout);
+ bool PreCallValidateCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkShadingRatePaletteNV* pShadingRatePalettes);
+ void PreCallRecordCmdSetViewportShadingRatePaletteNV(VkCommandBuffer commandBuffer, uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkShadingRatePaletteNV* pShadingRatePalettes);
+ bool PreCallValidateCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth);
+ void PreCallRecordCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth);
+ bool PreCallValidateCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
+ float depthBiasSlopeFactor);
+ void PreCallRecordCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp,
+ float depthBiasSlopeFactor);
+ bool PreCallValidateCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]);
+ void PreCallRecordCmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]);
+ bool PreCallValidateCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+ void PreCallRecordCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds);
+ bool PreCallValidateCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+ void PreCallRecordCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask);
+ bool PreCallValidateCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+ void PreCallRecordCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask);
+ bool PreCallValidateCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+ void PreCallRecordCmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference);
+ bool PreCallValidateCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
+ const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets);
+ void PreCallRecordCmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount,
+ const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets);
+ bool PreCallValidateCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites);
+ void PreCallRecordCmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites);
+ bool PreCallValidateCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkIndexType indexType);
+ void PreCallRecordCmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkIndexType indexType);
+ bool PreCallValidateCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
+ const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+ void PreCallRecordCmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount,
+ const VkBuffer* pBuffers, const VkDeviceSize* pOffsets);
+ bool PreCallValidateCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
+ uint32_t firstInstance);
+ void PreCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
+ uint32_t firstInstance);
+ void PostCallRecordCmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex,
+ uint32_t firstInstance);
+ bool PreCallValidateCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+ void PreCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+ void PostCallRecordCmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount,
+ uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance);
+ bool PreCallValidateCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride);
+ void PreCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride);
+ void PostCallRecordCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride);
+ bool PreCallValidateCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride);
+ bool PreCallValidateCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
+ void PreCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
+ void PostCallRecordCmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z);
+ bool PreCallValidateCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+ void PreCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+ void PostCallRecordCmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset);
+ bool PreCallValidateCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride);
+ void PreCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride);
+ void PostCallRecordCmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count,
+ uint32_t stride);
+ bool PreCallValidateCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+ void PreCallRecordCmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+ bool PreCallValidateCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+ void PreCallRecordCmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask);
+ bool PreCallValidateCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
+ VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+ void PreCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
+ VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+ void PostCallRecordCmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents,
+ VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
-bool PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags);
-void PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags);
-bool PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot);
-void PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot);
-bool PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount);
-void PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount);
-bool PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride,
- VkQueryResultFlags flags);
-void PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
- uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride,
- VkQueryResultFlags flags);
-bool PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
- uint32_t offset, uint32_t size, const void* pValues);
-bool PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool,
- uint32_t slot);
-void PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool,
- uint32_t slot);
-bool PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
-void PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer, VkResult result);
-bool PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
-void PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, VkResult result);
-bool PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo,
+ bool PreCallValidateCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+ void PreCallRecordCmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers);
+ bool PreCallValidateCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags);
+ void PostCallRecordCmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags);
+ bool PreCallValidateCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot);
+ void PostCallRecordCmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot);
+ bool PreCallValidateCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount);
+ void PostCallRecordCmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount);
+ bool PreCallValidateCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset,
+ VkDeviceSize stride, VkQueryResultFlags flags);
+ void PostCallRecordCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery,
+ uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride,
+ VkQueryResultFlags flags);
+ bool PreCallValidateCmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags,
+ uint32_t offset, uint32_t size, const void* pValues);
+ bool PreCallValidateCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool, uint32_t slot);
+ void PostCallRecordCmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool, uint32_t slot);
+ bool PreCallValidateCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer);
+ void PostCallRecordCreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer, VkResult result);
+ bool PreCallValidateCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
-void PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo,
+ void PostCallRecordCreateRenderPass(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, VkResult result);
-bool PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents);
-void PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
- VkSubpassContents contents);
-bool PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
-void PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
- const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
-bool PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents);
-void PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents);
-bool PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
- const VkSubpassEndInfoKHR* pSubpassEndInfo);
-void PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
- const VkSubpassEndInfoKHR* pSubpassEndInfo);
-bool PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer);
-void PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer);
-bool PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo);
-void PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo);
-bool PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
- const VkCommandBuffer* pCommandBuffers);
-void PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
- const VkCommandBuffer* pCommandBuffers);
-bool PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
- void** ppData);
-void PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
- void** ppData, VkResult result);
-bool PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem);
-void PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem);
-bool PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange* pMemRanges);
-bool PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange* pMemRanges);
-void PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange* pMemRanges,
- VkResult result);
-bool PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize* pCommittedMem);
-bool PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset);
-void PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset, VkResult result);
-bool PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos);
-void PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos,
- VkResult result);
-bool PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos);
-void PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos,
- VkResult result);
-bool PreCallValidateSetEvent(VkDevice device, VkEvent event);
-void PreCallRecordSetEvent(VkDevice device, VkEvent event);
-bool PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
-void PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence,
- VkResult result);
-void PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore, VkResult result);
-bool PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);
-void PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo, VkResult result);
-#ifdef VK_USE_PLATFORM_WIN32_KHR
-void PostCallRecordImportSemaphoreWin32HandleKHR(VkDevice device,
- const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo,
- VkResult result);
-bool PreCallValidateImportSemaphoreWin32HandleKHR(VkDevice device,
- const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
-bool PreCallValidateImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
-void PostCallRecordImportFenceWin32HandleKHR(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo,
- VkResult result);
-void PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
- HANDLE* pHandle, VkResult result);
-void PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle,
- VkResult result);
-#endif // VK_USE_PLATFORM_WIN32_KHR
-bool PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo);
-void PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo, VkResult result);
-
-void PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, VkResult result);
-void PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd, VkResult result);
-void PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
- VkEvent* pEvent, VkResult result);
-bool PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
-void PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain, VkResult result);
-void PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount,
- VkImage* pSwapchainImages);
-void PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount,
- VkImage* pSwapchainImages, VkResult result);
-bool PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
-void PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo, VkResult result);
-bool PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
- const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator,
- VkSwapchainKHR* pSwapchains);
-void PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos,
- const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains, VkResult result);
-bool PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
- VkFence fence, uint32_t* pImageIndex);
-bool PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex);
-void PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
- VkFence fence, uint32_t* pImageIndex, VkResult result);
-void PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex,
- VkResult result);
-void PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices,
- VkResult result);
-bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties* pQueueFamilyProperties);
-void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties* pQueueFamilyProperties);
-bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
-void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
-bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
-void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
- VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
-bool PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
-void PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
-void PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- VkSurfaceCapabilitiesKHR* pSurfaceCapabilities, VkResult result);
-void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
- VkSurfaceCapabilities2KHR* pSurfaceCapabilities, VkResult result);
-void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- VkSurfaceCapabilities2EXT* pSurfaceCapabilities, VkResult result);
-bool PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- VkSurfaceKHR surface, VkBool32* pSupported);
-void PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- VkSurfaceKHR surface, VkBool32* pSupported, VkResult result);
-void PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes,
- VkResult result);
-bool PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);
-void PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
- uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats,
- VkResult result);
-void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
- uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats,
- VkResult result);
-void PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
-void PreCallRecordQueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
-void PostCallRecordQueueEndDebugUtilsLabelEXT(VkQueue queue);
-void PreCallRecordQueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
-void PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
-void PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer);
-void PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
-void PostCallRecordCreateDebugUtilsMessengerEXT(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger,
- VkResult result);
-void PostCallRecordDestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
- const VkAllocationCallbacks* pAllocator);
-void PostCallRecordCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pMsgCallback,
- VkResult result);
-void PostCallDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
- const VkAllocationCallbacks* pAllocator);
-void PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount,
- VkPhysicalDeviceGroupPropertiesKHR* pPhysicalDeviceGroupProperties,
- VkResult result);
-void PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount,
- VkPhysicalDeviceGroupPropertiesKHR* pPhysicalDeviceGroupProperties,
+ bool PreCallValidateGetDeviceMemoryCommitment(VkDevice device, VkDeviceMemory mem, VkDeviceSize* pCommittedMem);
+ bool PreCallValidateCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass);
+ void PostCallRecordCreateRenderPass2KHR(VkDevice device, const VkRenderPassCreateInfo2KHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass, VkResult result);
+ bool PreCallValidateCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents);
+ void PreCallRecordCmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
+ VkSubpassContents contents);
+ bool PreCallValidateCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
+ void PreCallRecordCmdBeginRenderPass2KHR(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin,
+ const VkSubpassBeginInfoKHR* pSubpassBeginInfo);
+ bool PreCallValidateCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+ void PostCallRecordCmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents);
+ bool PreCallValidateCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo);
+ void PostCallRecordCmdNextSubpass2KHR(VkCommandBuffer commandBuffer, const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
+ const VkSubpassEndInfoKHR* pSubpassEndInfo);
+ bool PreCallValidateCmdEndRenderPass(VkCommandBuffer commandBuffer);
+ void PostCallRecordCmdEndRenderPass(VkCommandBuffer commandBuffer);
+ bool PreCallValidateCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo);
+ void PostCallRecordCmdEndRenderPass2KHR(VkCommandBuffer commandBuffer, const VkSubpassEndInfoKHR* pSubpassEndInfo);
+ bool PreCallValidateCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
+ const VkCommandBuffer* pCommandBuffers);
+ void PreCallRecordCmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount,
+ const VkCommandBuffer* pCommandBuffers);
+ bool PreCallValidateMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
+ void** ppData);
+ void PostCallRecordMapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
+ void** ppData, VkResult result);
+ bool PreCallValidateUnmapMemory(VkDevice device, VkDeviceMemory mem);
+ void PreCallRecordUnmapMemory(VkDevice device, VkDeviceMemory mem);
+ bool PreCallValidateFlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange* pMemRanges);
+ bool PreCallValidateInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
+ const VkMappedMemoryRange* pMemRanges);
+ void PostCallRecordInvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount, const VkMappedMemoryRange* pMemRanges,
VkResult result);
-bool PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate);
-void PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate, VkResult result);
-bool PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate);
-void PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate, VkResult result);
-void PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- const VkAllocationCallbacks* pAllocator);
-void PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
-void PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
-bool PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
-void PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
+ bool PreCallValidateBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset);
+ void PostCallRecordBindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset,
+ VkResult result);
+ bool PreCallValidateBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos);
+ void PostCallRecordBindImageMemory2(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos,
+ VkResult result);
+ bool PreCallValidateBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos);
+ void PostCallRecordBindImageMemory2KHR(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfoKHR* pBindInfos,
+ VkResult result);
+ bool PreCallValidateSetEvent(VkDevice device, VkEvent event);
+ void PreCallRecordSetEvent(VkDevice device, VkEvent event);
+ bool PreCallValidateQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence);
+ void PostCallRecordQueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence,
+ VkResult result);
+ void PostCallRecordCreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore, VkResult result);
+ bool PreCallValidateImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo);
+ void PostCallRecordImportSemaphoreFdKHR(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo,
+ VkResult result);
+#ifdef VK_USE_PLATFORM_WIN32_KHR
+ void PostCallRecordImportSemaphoreWin32HandleKHR(VkDevice device,
+ const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo,
+ VkResult result);
+ bool PreCallValidateImportSemaphoreWin32HandleKHR(VkDevice device,
+ const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
+ bool PreCallValidateImportFenceWin32HandleKHR(VkDevice device,
+ const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
+ void PostCallRecordImportFenceWin32HandleKHR(VkDevice device,
+ const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo,
+ VkResult result);
+ void PostCallRecordGetSemaphoreWin32HandleKHR(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+ HANDLE* pHandle, VkResult result);
+ void PostCallRecordGetFenceWin32HandleKHR(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
+ HANDLE* pHandle, VkResult result);
+#endif // VK_USE_PLATFORM_WIN32_KHR
+ bool PreCallValidateImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo);
+ void PostCallRecordImportFenceFdKHR(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo, VkResult result);
-bool PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- VkPipelineLayout layout, uint32_t set, const void* pData);
-void PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
- VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
- VkPipelineLayout layout, uint32_t set, const void* pData);
-void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
- VkDisplayPlanePropertiesKHR* pProperties, VkResult result);
-void PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
- VkDisplayPlaneProperties2KHR* pProperties, VkResult result);
-bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
- uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
-bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex,
- VkDisplayPlaneCapabilitiesKHR* pCapabilities);
-bool PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
- const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
- VkDisplayPlaneCapabilities2KHR* pCapabilities);
-bool PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer);
-bool PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
- uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles);
-bool PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo);
-bool PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-void PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer,
- VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride);
-void PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-bool PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask);
-void PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask);
-bool PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- uint32_t drawCount, uint32_t stride);
-void PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- uint32_t drawCount, uint32_t stride);
-bool PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-void PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
- VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
- uint32_t stride);
-void PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
- const VkAllocationCallbacks* pAllocator);
-void PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
- const VkAllocationCallbacks* pAllocator);
-bool PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT* pInfo);
-void PostCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceProperties* pPhysicalDeviceProperties);
+ void PostCallRecordGetSemaphoreFdKHR(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd, VkResult result);
+ void PostCallRecordGetFenceFdKHR(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd, VkResult result);
+ void PostCallRecordCreateEvent(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator,
+ VkEvent* pEvent, VkResult result);
+ bool PreCallValidateCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain);
+ void PostCallRecordCreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain, VkResult result);
+ void PreCallRecordDestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount,
+ VkImage* pSwapchainImages);
+ void PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount,
+ VkImage* pSwapchainImages, VkResult result);
+ bool PreCallValidateQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo);
+ void PostCallRecordQueuePresentKHR(VkQueue queue, const VkPresentInfoKHR* pPresentInfo, VkResult result);
+ bool PreCallValidateCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains);
+ void PostCallRecordCreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
+ const VkSwapchainCreateInfoKHR* pCreateInfos,
+ const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains,
+ VkResult result);
+ bool PreCallValidateAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
+ VkFence fence, uint32_t* pImageIndex);
+ bool PreCallValidateAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex);
+ void PostCallRecordAcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore,
+ VkFence fence, uint32_t* pImageIndex, VkResult result);
+ void PostCallRecordAcquireNextImage2KHR(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex,
+ VkResult result);
+ void PostCallRecordEnumeratePhysicalDevices(VkInstance instance, uint32_t* pPhysicalDeviceCount,
+ VkPhysicalDevice* pPhysicalDevices, VkResult result);
+ bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties* pQueueFamilyProperties);
+ void PostCallRecordGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties* pQueueFamilyProperties);
+ bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
+ void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
+ bool PreCallValidateGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
+ void PostCallRecordGetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
+ uint32_t* pQueueFamilyPropertyCount,
+ VkQueueFamilyProperties2KHR* pQueueFamilyProperties);
+ bool PreCallValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordValidateDestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator);
+ void PostCallRecordGetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ VkSurfaceCapabilitiesKHR* pSurfaceCapabilities, VkResult result);
+ void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+ VkSurfaceCapabilities2KHR* pSurfaceCapabilities, VkResult result);
+ void PostCallRecordGetPhysicalDeviceSurfaceCapabilities2EXT(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ VkSurfaceCapabilities2EXT* pSurfaceCapabilities, VkResult result);
+ bool PreCallValidateGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface, VkBool32* pSupported);
+ void PostCallRecordGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ VkSurfaceKHR surface, VkBool32* pSupported, VkResult result);
+ void PostCallRecordGetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes,
+ VkResult result);
+ bool PreCallValidateGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats);
+ void PostCallRecordGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
+ uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats,
+ VkResult result);
+ void PostCallRecordGetPhysicalDeviceSurfaceFormats2KHR(VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
+ uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats,
+ VkResult result);
+ void PostCallRecordCreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface,
+ VkResult result);
+ void PreCallRecordQueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
+ void PostCallRecordQueueEndDebugUtilsLabelEXT(VkQueue queue);
+ void PreCallRecordQueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo);
+ void PreCallRecordCmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
+ void PostCallRecordCmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer);
+ void PreCallRecordCmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo);
+ void PostCallRecordCreateDebugUtilsMessengerEXT(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger,
+ VkResult result);
+ void PostCallRecordDestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
+ const VkAllocationCallbacks* pAllocator);
+ void PostCallRecordDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
+ const VkAllocationCallbacks* pAllocator);
+ void PostCallRecordEnumeratePhysicalDeviceGroups(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupPropertiesKHR* pPhysicalDeviceGroupProperties,
+ VkResult result);
+ void PostCallRecordEnumeratePhysicalDeviceGroupsKHR(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount,
+ VkPhysicalDeviceGroupPropertiesKHR* pPhysicalDeviceGroupProperties,
+ VkResult result);
+ bool PreCallValidateCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate);
+ void PostCallRecordCreateDescriptorUpdateTemplate(VkDevice device, const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate, VkResult result);
+ bool PreCallValidateCreateDescriptorUpdateTemplateKHR(VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate);
+ void PostCallRecordCreateDescriptorUpdateTemplateKHR(VkDevice device,
+ const VkDescriptorUpdateTemplateCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkDescriptorUpdateTemplateKHR* pDescriptorUpdateTemplate, VkResult result);
+ void PreCallRecordDestroyDescriptorUpdateTemplate(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordDestroyDescriptorUpdateTemplateKHR(VkDevice device, VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);
+ void PreCallRecordUpdateDescriptorSetWithTemplate(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData);
+ bool PreCallValidateUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ const void* pData);
+ void PreCallRecordUpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, const void* pData);
+
+ bool PreCallValidateCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkPipelineLayout layout, uint32_t set, const void* pData);
+ void PreCallRecordCmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
+ VkPipelineLayout layout, uint32_t set, const void* pData);
+ void PostCallRecordGetPhysicalDeviceDisplayPlanePropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
+ VkDisplayPlanePropertiesKHR* pProperties, VkResult result);
+ void PostCallRecordGetPhysicalDeviceDisplayPlaneProperties2KHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount,
+ VkDisplayPlaneProperties2KHR* pProperties, VkResult result);
+ bool PreCallValidateGetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
+ uint32_t* pDisplayCount, VkDisplayKHR* pDisplays);
+ bool PreCallValidateGetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex,
+ VkDisplayPlaneCapabilitiesKHR* pCapabilities);
+ bool PreCallValidateGetDisplayPlaneCapabilities2KHR(VkPhysicalDevice physicalDevice,
+ const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo,
+ VkDisplayPlaneCapabilities2KHR* pCapabilities);
+ bool PreCallValidateCmdDebugMarkerEndEXT(VkCommandBuffer commandBuffer);
+ bool PreCallValidateCmdSetDiscardRectangleEXT(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle,
+ uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles);
+ bool PreCallValidateCmdSetSampleLocationsEXT(VkCommandBuffer commandBuffer,
+ const VkSampleLocationsInfoEXT* pSampleLocationsInfo);
+ bool PreCallValidateCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride);
+ void PreCallRecordCmdDrawIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride);
+ void PreCallRecordCmdDrawIndexedIndirectCountKHR(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride);
+ bool PreCallValidateCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask);
+ void PreCallRecordCmdDrawMeshTasksNV(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask);
+ bool PreCallValidateCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t drawCount, uint32_t stride);
+ void PreCallRecordCmdDrawMeshTasksIndirectNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ uint32_t drawCount, uint32_t stride);
+ bool PreCallValidateCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride);
+ void PreCallRecordCmdDrawMeshTasksIndirectCountNV(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset,
+ VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount,
+ uint32_t stride);
+ void PostCallRecordDestroySamplerYcbcrConversion(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks* pAllocator);
+ void PreCallRecordGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceProperties* pPhysicalDeviceProperties);
+ void PostCallRecordDestroySamplerYcbcrConversionKHR(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion,
+ const VkAllocationCallbacks* pAllocator);
+ bool PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT* pInfo);
#ifdef VK_USE_PLATFORM_ANDROID_KHR
-bool PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer* buffer,
- VkAndroidHardwareBufferPropertiesANDROID* pProperties);
-void PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer* buffer,
- VkAndroidHardwareBufferPropertiesANDROID* pProperties, VkResult result);
-bool PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
- struct AHardwareBuffer** pBuffer);
-void PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ bool PreCallValidateGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer* buffer,
+ VkAndroidHardwareBufferPropertiesANDROID* pProperties);
+ void PostCallRecordGetAndroidHardwareBufferProperties(VkDevice device, const struct AHardwareBuffer* buffer,
+ VkAndroidHardwareBufferPropertiesANDROID* pProperties, VkResult result);
+ bool PreCallValidateGetMemoryAndroidHardwareBuffer(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
+ struct AHardwareBuffer** pBuffer);
+ void PostCallRecordCreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_IOS_MVK
-void PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ void PostCallRecordCreateIOSSurfaceMVK(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_MACOS_MVK
-void PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ void PostCallRecordCreateMacOSSurfaceMVK(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
-bool PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- struct wl_display* display);
-void PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ bool PreCallValidateGetPhysicalDeviceWaylandPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ struct wl_display* display);
+ void PostCallRecordCreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
-bool PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
-void PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ bool PreCallValidateGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
+ void PostCallRecordCreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
-bool PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- xcb_connection_t* connection, xcb_visualid_t visual_id);
-void PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ bool PreCallValidateGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ xcb_connection_t* connection, xcb_visualid_t visual_id);
+ void PostCallRecordCreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
-bool PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
- Display* dpy, VisualID visualID);
-void PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
+ bool PreCallValidateGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
+ Display* dpy, VisualID visualID);
+ void PostCallRecordCreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface, VkResult result);
#endif // VK_USE_PLATFORM_XLIB_KHR
-}; // namespace core_validation
+}; // Class CoreChecks
diff --git a/layers/core_validation_types.h b/layers/core_validation_types.h
index 098eaa1..bed42da 100644
--- a/layers/core_validation_types.h
+++ b/layers/core_validation_types.h
@@ -49,6 +49,10 @@
#include "android_ndk_types.h"
#endif // VK_USE_PLATFORM_ANDROID_KHR
+class CoreChecks;
+typedef CoreChecks layer_data;
+typedef CoreChecks instance_layer_data;
+
// Fwd declarations -- including descriptor_set.h creates an ugly include loop
namespace cvdescriptorset {
class DescriptorSetLayoutDef;
@@ -92,7 +96,7 @@
}
template <typename Barrier, bool assume_transfer = false>
-static bool IsReleaseOp(const COMMAND_POOL_NODE *pool, const Barrier *barrier) {
+static bool TempIsReleaseOp(const COMMAND_POOL_NODE *pool, const Barrier *barrier) {
return (assume_transfer || IsTransferOp(barrier)) && (pool->queueFamilyIndex == barrier->srcQueueFamilyIndex);
}
@@ -552,14 +556,6 @@
// clang-format on
};
-struct TEMPLATE_STATE {
- VkDescriptorUpdateTemplateKHR desc_update_template;
- safe_VkDescriptorUpdateTemplateCreateInfo create_info;
-
- TEMPLATE_STATE(VkDescriptorUpdateTemplateKHR update_template, safe_VkDescriptorUpdateTemplateCreateInfo *pCreateInfo)
- : desc_update_template(update_template), create_info(*pCreateInfo) {}
-};
-
struct QueryObject {
VkQueryPool pool;
uint32_t index;
@@ -1085,53 +1081,6 @@
VkFormat format;
};
-// CHECK_DISABLED struct is a container for bools that can block validation checks from being performed.
-// The end goal is to have all checks guarded by a bool. The bools are all "false" by default meaning that all checks
-// are enabled. At CreateInstance time, the user can use the VK_EXT_validation_flags extension to pass in enum values
-// of VkValidationCheckEXT that will selectively disable checks.
-// The VK_EXT_validation_features extension can also be used with the VkValidationFeaturesEXT structure to set
-// disables in the CHECK_DISABLED struct and/or enables in the CHECK_ENABLED struct.
-struct CHECK_DISABLED {
- bool command_buffer_state;
- bool create_descriptor_set_layout;
- bool destroy_buffer_view; // Skip validation at DestroyBufferView time
- bool destroy_image_view; // Skip validation at DestroyImageView time
- bool destroy_pipeline; // Skip validation at DestroyPipeline time
- bool destroy_descriptor_pool; // Skip validation at DestroyDescriptorPool time
- bool destroy_framebuffer; // Skip validation at DestroyFramebuffer time
- bool destroy_renderpass; // Skip validation at DestroyRenderpass time
- bool destroy_image; // Skip validation at DestroyImage time
- bool destroy_sampler; // Skip validation at DestroySampler time
- bool destroy_command_pool; // Skip validation at DestroyCommandPool time
- bool destroy_event; // Skip validation at DestroyEvent time
- bool free_memory; // Skip validation at FreeMemory time
- bool object_in_use; // Skip all object in_use checking
- bool idle_descriptor_set; // Skip check to verify that descriptor set is no in-use
- bool push_constant_range; // Skip push constant range checks
- bool free_descriptor_sets; // Skip validation prior to vkFreeDescriptorSets()
- bool allocate_descriptor_sets; // Skip validation prior to vkAllocateDescriptorSets()
- bool update_descriptor_sets; // Skip validation prior to vkUpdateDescriptorSets()
- bool wait_for_fences;
- bool get_fence_state;
- bool queue_wait_idle;
- bool device_wait_idle;
- bool destroy_fence;
- bool destroy_semaphore;
- bool destroy_query_pool;
- bool get_query_pool_results;
- bool destroy_buffer;
- bool shader_validation; // Skip validation for shaders
-
- void SetAll(bool value) { std::fill(&command_buffer_state, &shader_validation + 1, value); }
-};
-
-struct CHECK_ENABLED {
- bool gpu_validation;
- bool gpu_validation_reserve_binding_slot;
-
- void SetAll(bool value) { std::fill(&gpu_validation, &gpu_validation_reserve_binding_slot + 1, value); }
-};
-
struct MT_FB_ATTACHMENT_INFO {
IMAGE_VIEW_STATE *view_state;
VkImage image;
@@ -1194,95 +1143,12 @@
VkCommandBuffer barrier_command_buffer;
};
-// Fwd declarations of layer_data and helpers to look-up/validate state from layer_data maps
-namespace core_validation {
-struct layer_data;
-cvdescriptorset::DescriptorSet *GetSetNode(const layer_data *, VkDescriptorSet);
+enum BarrierOperationsType {
+ kAllAcquire, // All Barrier operations are "ownership acquire" operations
+ kAllRelease, // All Barrier operations are "ownership release" operations
+ kGeneral, // Either no ownership operations or a mix of ownership operation types and/or non-ownership operations
+};
+
std::shared_ptr<cvdescriptorset::DescriptorSetLayout const> const GetDescriptorSetLayout(layer_data const *, VkDescriptorSetLayout);
-DESCRIPTOR_POOL_STATE *GetDescriptorPoolState(const layer_data *, const VkDescriptorPool);
-BUFFER_STATE *GetBufferState(const layer_data *, VkBuffer);
-IMAGE_STATE *GetImageState(const layer_data *, VkImage);
-DEVICE_MEM_INFO *GetMemObjInfo(const layer_data *, VkDeviceMemory);
-BUFFER_VIEW_STATE *GetBufferViewState(const layer_data *, VkBufferView);
-SAMPLER_STATE *GetSamplerState(const layer_data *, VkSampler);
-IMAGE_VIEW_STATE *GetAttachmentImageViewState(layer_data *dev_data, FRAMEBUFFER_STATE *framebuffer, uint32_t index);
-IMAGE_VIEW_STATE *GetImageViewState(const layer_data *, VkImageView);
-SWAPCHAIN_NODE *GetSwapchainNode(const layer_data *, VkSwapchainKHR);
-GLOBAL_CB_NODE *GetCBNode(layer_data const *my_data, const VkCommandBuffer cb);
-PIPELINE_STATE *GetPipelineState(layer_data const *dev_data, VkPipeline pipeline);
-RENDER_PASS_STATE *GetRenderPassState(layer_data const *dev_data, VkRenderPass renderpass);
-std::shared_ptr<RENDER_PASS_STATE> GetRenderPassStateSharedPtr(layer_data const *dev_data, VkRenderPass renderpass);
-FRAMEBUFFER_STATE *GetFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer);
-COMMAND_POOL_NODE *GetCommandPoolNode(layer_data *dev_data, VkCommandPool pool);
-shader_module const *GetShaderModuleState(layer_data const *dev_data, VkShaderModule module);
-const DeviceFeatures *GetEnabledFeatures(const layer_data *device_data);
-const DeviceExtensions *GetEnabledExtensions(const layer_data *device_data);
-
-void InvalidateCommandBuffers(const layer_data *, std::unordered_set<GLOBAL_CB_NODE *> const &, VK_OBJECT);
-bool ValidateMemoryIsBoundToBuffer(const layer_data *, const BUFFER_STATE *, const char *, const char *);
-bool ValidateMemoryIsBoundToImage(const layer_data *, const IMAGE_STATE *, const char *, const char *);
-void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *, SAMPLER_STATE *);
-void AddCommandBufferBindingImage(const layer_data *, GLOBAL_CB_NODE *, IMAGE_STATE *);
-void AddCommandBufferBindingImageView(const layer_data *, GLOBAL_CB_NODE *, IMAGE_VIEW_STATE *);
-void AddCommandBufferBindingBuffer(const layer_data *, GLOBAL_CB_NODE *, BUFFER_STATE *);
-void AddCommandBufferBindingBufferView(const layer_data *, GLOBAL_CB_NODE *, BUFFER_VIEW_STATE *);
-bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct, const char *caller_name,
- const char *error_code);
-void InvalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj);
-void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info);
-void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info);
-void ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VulkanObjectType type);
-bool ValidateCmdQueueFlags(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *caller_name, VkQueueFlags flags,
- const char *error_code);
-bool ValidateCmd(layer_data *my_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name);
-bool InsideRenderPass(const layer_data *my_data, const GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode);
-void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid);
-bool OutsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName, const char *msgCode);
-void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node);
-void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout);
-bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName);
-bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
- const char *location, const std::string &msgCode);
-bool RangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end);
-bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName);
-void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid);
-bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type);
-bool ValidateCmd(layer_data *dev_data, const GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd, const char *caller_name);
-
-// Prototypes for layer_data accessor functions. These should be in their own header file at some point
-VkFormatProperties GetPDFormatProperties(const core_validation::layer_data *device_data, const VkFormat format);
-VkResult GetPDImageFormatProperties(core_validation::layer_data *, const VkImageCreateInfo *, VkImageFormatProperties *);
-VkResult GetPDImageFormatProperties2(core_validation::layer_data *, const VkPhysicalDeviceImageFormatInfo2 *,
- VkImageFormatProperties2 *);
-const debug_report_data *GetReportData(const layer_data *);
-const VkLayerDispatchTable *GetDispatchTable(const layer_data *);
-const VkPhysicalDeviceProperties *GetPDProperties(const layer_data *);
-const VkPhysicalDeviceMemoryProperties *GetPhysicalDeviceMemoryProperties(const layer_data *);
-const CHECK_DISABLED *GetDisables(layer_data *);
-const CHECK_ENABLED *GetEnables(layer_data *);
-std::unordered_map<VkImage, std::unique_ptr<IMAGE_STATE>> *GetImageMap(core_validation::layer_data *);
-std::unordered_map<VkImage, std::vector<ImageSubresourcePair>> *GetImageSubresourceMap(layer_data *);
-std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> *GetImageLayoutMap(layer_data *);
-std::unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> const *GetImageLayoutMap(layer_data const *);
-std::unordered_map<VkBuffer, std::unique_ptr<BUFFER_STATE>> *GetBufferMap(layer_data *device_data);
-std::unordered_map<VkBufferView, std::unique_ptr<BUFFER_VIEW_STATE>> *GetBufferViewMap(layer_data *device_data);
-std::unordered_map<VkImageView, std::unique_ptr<IMAGE_VIEW_STATE>> *GetImageViewMap(layer_data *device_data);
-std::unordered_map<VkSamplerYcbcrConversion, uint64_t> *GetYcbcrConversionFormatMap(layer_data *);
-std::unordered_set<uint64_t> *GetAHBExternalFormatsSet(layer_data *);
-
-const DeviceExtensions *GetDeviceExtensions(const layer_data *);
-GpuValidationState *GetGpuValidationState(layer_data *);
-const GpuValidationState *GetGpuValidationState(const layer_data *);
-VkDevice GetDevice(const layer_data *);
-
-uint32_t GetApiVersion(const layer_data *);
-
-VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue);
-
-GlobalQFOTransferBarrierMap<VkImageMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
- layer_data *dev_data, const QFOTransferBarrier<VkImageMemoryBarrier>::Tag &type_tag);
-GlobalQFOTransferBarrierMap<VkBufferMemoryBarrier> &GetGlobalQFOReleaseBarrierMap(
- layer_data *dev_data, const QFOTransferBarrier<VkBufferMemoryBarrier>::Tag &type_tag);
-} // namespace core_validation
#endif // CORE_VALIDATION_TYPES_H_
diff --git a/layers/descriptor_sets.cpp b/layers/descriptor_sets.cpp
index cd9a824..6c2d7c8 100644
--- a/layers/descriptor_sets.cpp
+++ b/layers/descriptor_sets.cpp
@@ -22,6 +22,7 @@
// Allow use of STL min and max functions in Windows
#define NOMINMAX
+#include "chassis.h"
#include "core_validation_error_enums.h"
#include "core_validation.h"
#include "descriptor_sets.h"
@@ -598,7 +599,7 @@
device_data_(dev_data),
limits_(dev_data->phys_dev_props.limits),
variable_count_(variable_count) {
- pool_state_ = GetDescriptorPoolState(dev_data, pool);
+ pool_state_ = dev_data->GetDescriptorPoolState(dev_data, pool);
// Foreach binding, create default descriptors of given type
descriptors_.reserve(p_layout_->GetTotalDescriptorCount());
for (uint32_t i = 0; i < p_layout_->GetBindingCount(); ++i) {
@@ -739,7 +740,7 @@
if (descriptor_class == GeneralBuffer) {
// Verify that buffers are valid
auto buffer = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetBuffer();
- auto buffer_node = GetBufferState(device_data_, buffer);
+ auto buffer_node = device_data_->GetBufferState(device_data_, buffer);
if (!buffer_node) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
@@ -748,7 +749,7 @@
return false;
} else if (!buffer_node->sparse) {
for (auto mem_binding : buffer_node->GetBoundMemory()) {
- if (!GetMemObjInfo(device_data_, mem_binding)) {
+ if (!device_data_->GetMemObjInfo(device_data_, mem_binding)) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " uses buffer " << buffer << " that references invalid memory " << mem_binding << ".";
@@ -797,7 +798,7 @@
}
auto reqs = binding_pair.second;
- auto image_view_state = GetImageViewState(device_data_, image_view);
+ auto image_view_state = device_data_->GetImageViewState(device_data_, image_view);
if (nullptr == image_view_state) {
// Image view must have been destroyed since initial update. Could potentially flag the descriptor
// as "invalid" (updated = false) at DestroyImageView() time and detect this error at bind time
@@ -830,7 +831,7 @@
return false;
}
- auto image_node = GetImageState(device_data_, image_view_ci.image);
+ auto image_node = device_data_->GetImageState(device_data_, image_view_ci.image);
assert(image_node);
// Verify Image Layout
// Copy first mip level into sub_layers and loop over each mip level to verify layout
@@ -843,8 +844,9 @@
cur_level < image_view_ci.subresourceRange.levelCount; ++cur_level) {
sub_layers.mipLevel = cur_level;
// No "invalid layout" VUID required for this call, since the optimal_layout parameter is UNDEFINED.
- VerifyImageLayout(device_data_, cb_node, image_node, sub_layers, image_layout, VK_IMAGE_LAYOUT_UNDEFINED,
- caller, kVUIDUndefined, "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error);
+ device_data_->VerifyImageLayout(device_data_, cb_node, image_node, sub_layers, image_layout,
+ VK_IMAGE_LAYOUT_UNDEFINED, caller, kVUIDUndefined,
+ "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error);
if (hit_error) {
*error =
"Image layout specified at vkUpdateDescriptorSet* or vkCmdPushDescriptorSet* time "
@@ -871,7 +873,7 @@
}
} else if (descriptor_class == TexelBuffer) {
auto texel_buffer = static_cast<TexelDescriptor *>(descriptors_[i].get());
- auto buffer_view = GetBufferViewState(device_data_, texel_buffer->GetBufferView());
+ auto buffer_view = device_data_->GetBufferViewState(device_data_, texel_buffer->GetBufferView());
auto reqs = binding_pair.second;
auto format_bits = DescriptorRequirementsBitsFromFormat(buffer_view->create_info.format);
@@ -932,7 +934,7 @@
for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) {
if (descriptors_[start_idx + i]->updated) {
auto bufferview = static_cast<TexelDescriptor *>(descriptors_[start_idx + i].get())->GetBufferView();
- auto bv_state = GetBufferViewState(device_data_, bufferview);
+ auto bv_state = device_data_->GetBufferViewState(device_data_, bufferview);
if (bv_state) {
buffer_set->insert(bv_state->create_info.buffer);
num_updates++;
@@ -953,7 +955,7 @@
}
// Set is being deleted or updates so invalidate all bound cmd buffers
void cvdescriptorset::DescriptorSet::InvalidateBoundCmdBuffers() {
- core_validation::InvalidateCommandBuffers(device_data_, cb_bindings, {HandleToUint64(set_), kVulkanObjectTypeDescriptorSet});
+ device_data_->InvalidateCommandBuffers(device_data_, cb_bindings, {HandleToUint64(set_), kVulkanObjectTypeDescriptorSet});
}
// Loop through the write updates to do for a push descriptor set, ignoring dstSet
@@ -1315,15 +1317,15 @@
}
}
// Validate given sampler. Currently this only checks to make sure it exists in the samplerMap
-bool cvdescriptorset::ValidateSampler(const VkSampler sampler, const layer_data *dev_data) {
- return (GetSamplerState(dev_data, sampler) != nullptr);
+bool cvdescriptorset::ValidateSampler(const VkSampler sampler, layer_data *dev_data) {
+ return (dev_data->GetSamplerState(dev_data, sampler) != nullptr);
}
bool cvdescriptorset::ValidateImageUpdate(VkImageView image_view, VkImageLayout image_layout, VkDescriptorType type,
- const layer_data *dev_data, const char *func_name, std::string *error_code,
+ layer_data *dev_data, const char *func_name, std::string *error_code,
std::string *error_msg) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00326";
- auto iv_state = GetImageViewState(dev_data, image_view);
+ auto iv_state = dev_data->GetImageViewState(dev_data, image_view);
if (!iv_state) {
std::stringstream error_str;
error_str << "Invalid VkImageView: " << image_view;
@@ -1337,14 +1339,14 @@
VkImage image = iv_state->create_info.image;
VkFormat format = VK_FORMAT_MAX_ENUM;
VkImageUsageFlags usage = 0;
- auto image_node = GetImageState(dev_data, image);
+ auto image_node = dev_data->GetImageState(dev_data, image);
if (image_node) {
format = image_node->createInfo.format;
usage = image_node->createInfo.usage;
// Validate that memory is bound to image
// TODO: This should have its own valid usage id apart from 2524 which is from CreateImageView case. The only
// the error here occurs is if memory bound to a created imageView has been freed.
- if (ValidateMemoryIsBoundToImage(dev_data, image_node, func_name, "VUID-VkImageViewCreateInfo-image-01020")) {
+ if (dev_data->ValidateMemoryIsBoundToImage(dev_data, image_node, func_name, "VUID-VkImageViewCreateInfo-image-01020")) {
*error_code = "VUID-VkImageViewCreateInfo-image-01020";
*error_msg = "No memory bound to image.";
return false;
@@ -1518,7 +1520,7 @@
{VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2},
{VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}}};
auto is_layout = [image_layout, dev_data](const ExtensionLayout &ext_layout) {
- return dev_data->extensions.*(ext_layout.extension) && (ext_layout.layout == image_layout);
+ return dev_data->device_extensions.*(ext_layout.extension) && (ext_layout.layout == image_layout);
};
bool valid_layout = (std::find(valid_layouts.cbegin(), valid_layouts.cend(), image_layout) != valid_layouts.cend()) ||
@@ -1532,7 +1534,7 @@
<< ". Allowed layouts are: VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, "
<< "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL";
for (auto &ext_layout : extended_layouts) {
- if (dev_data->extensions.*(ext_layout.extension)) {
+ if (dev_data->device_extensions.*(ext_layout.extension)) {
error_str << ", " << string_VkImageLayout(ext_layout.layout);
}
}
@@ -1561,8 +1563,8 @@
void cvdescriptorset::SamplerDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
if (!immutable_) {
- auto sampler_state = GetSamplerState(dev_data, sampler_);
- if (sampler_state) core_validation::AddCommandBufferBindingSampler(cb_node, sampler_state);
+ auto sampler_state = dev_data->GetSamplerState(dev_data, sampler_);
+ if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state);
}
}
@@ -1601,16 +1603,16 @@
void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
// First add binding for any non-immutable sampler
if (!immutable_) {
- auto sampler_state = GetSamplerState(dev_data, sampler_);
- if (sampler_state) core_validation::AddCommandBufferBindingSampler(cb_node, sampler_state);
+ auto sampler_state = dev_data->GetSamplerState(dev_data, sampler_);
+ if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state);
}
// Add binding for image
- auto iv_state = GetImageViewState(dev_data, image_view_);
+ auto iv_state = dev_data->GetImageViewState(dev_data, image_view_);
if (iv_state) {
- core_validation::AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
+ dev_data->AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
}
if (image_view_) {
- SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
+ dev_data->SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
}
}
@@ -1638,12 +1640,12 @@
void cvdescriptorset::ImageDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
// Add binding for image
- auto iv_state = GetImageViewState(dev_data, image_view_);
+ auto iv_state = dev_data->GetImageViewState(dev_data, image_view_);
if (iv_state) {
- core_validation::AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
+ dev_data->AddCommandBufferBindingImageView(dev_data, cb_node, iv_state);
}
if (image_view_) {
- SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
+ dev_data->SetImageViewLayout(dev_data, cb_node, image_view_, image_layout_);
}
}
@@ -1677,8 +1679,8 @@
}
void cvdescriptorset::BufferDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
- auto buffer_node = GetBufferState(dev_data, buffer_);
- if (buffer_node) core_validation::AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
+ auto buffer_node = dev_data->GetBufferState(dev_data, buffer_);
+ if (buffer_node) dev_data->AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_node);
}
cvdescriptorset::TexelDescriptor::TexelDescriptor(const VkDescriptorType type) : buffer_view_(VK_NULL_HANDLE), storage_(false) {
@@ -1698,9 +1700,9 @@
}
void cvdescriptorset::TexelDescriptor::UpdateDrawState(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
- auto bv_state = GetBufferViewState(dev_data, buffer_view_);
+ auto bv_state = dev_data->GetBufferViewState(dev_data, buffer_view_);
if (bv_state) {
- core_validation::AddCommandBufferBindingBufferView(dev_data, cb_node, bv_state);
+ dev_data->AddCommandBufferBindingBufferView(dev_data, cb_node, bv_state);
}
}
@@ -1709,14 +1711,14 @@
// If the update hits an issue for which the callback returns "true", meaning that the call down the chain should
// be skipped, then true is returned.
// If there is no issue with the update, then false is returned.
-bool cvdescriptorset::ValidateUpdateDescriptorSets(const debug_report_data *report_data, const layer_data *dev_data,
- uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count,
- const VkCopyDescriptorSet *p_cds, const char *func_name) {
+bool CoreChecks::ValidateUpdateDescriptorSets(const debug_report_data *report_data, const layer_data *dev_data,
+ uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count,
+ const VkCopyDescriptorSet *p_cds, const char *func_name) {
bool skip = false;
// Validate Write updates
for (uint32_t i = 0; i < write_count; i++) {
auto dest_set = p_wds[i].dstSet;
- auto set_node = core_validation::GetSetNode(dev_data, dest_set);
+ auto set_node = GetSetNode(dev_data, dest_set);
if (!set_node) {
skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
HandleToUint64(dest_set), kVUID_Core_DrawState_InvalidDescriptorSet,
@@ -1737,8 +1739,8 @@
for (uint32_t i = 0; i < copy_count; ++i) {
auto dst_set = p_cds[i].dstSet;
auto src_set = p_cds[i].srcSet;
- auto src_node = core_validation::GetSetNode(dev_data, src_set);
- auto dst_node = core_validation::GetSetNode(dev_data, dst_set);
+ auto src_node = GetSetNode(dev_data, src_set);
+ auto dst_node = GetSetNode(dev_data, dst_set);
// Object_tracker verifies that src & dest descriptor set are valid
assert(src_node);
assert(dst_node);
@@ -1759,14 +1761,13 @@
// with the same set of updates.
// This is split from the validate code to allow validation prior to calling down the chain, and then update after
// calling down the chain.
-void cvdescriptorset::PerformUpdateDescriptorSets(const layer_data *dev_data, uint32_t write_count,
- const VkWriteDescriptorSet *p_wds, uint32_t copy_count,
- const VkCopyDescriptorSet *p_cds) {
+void cvdescriptorset::PerformUpdateDescriptorSets(layer_data *dev_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds,
+ uint32_t copy_count, const VkCopyDescriptorSet *p_cds) {
// Write updates first
uint32_t i = 0;
for (i = 0; i < write_count; ++i) {
auto dest_set = p_wds[i].dstSet;
- auto set_node = core_validation::GetSetNode(dev_data, dest_set);
+ auto set_node = dev_data->GetSetNode(dev_data, dest_set);
if (set_node) {
set_node->PerformWriteUpdate(&p_wds[i]);
}
@@ -1775,8 +1776,8 @@
for (i = 0; i < copy_count; ++i) {
auto dst_set = p_cds[i].dstSet;
auto src_set = p_cds[i].srcSet;
- auto src_node = core_validation::GetSetNode(dev_data, src_set);
- auto dst_node = core_validation::GetSetNode(dev_data, dst_set);
+ auto src_node = dev_data->GetSetNode(dev_data, src_set);
+ auto dst_node = dev_data->GetSetNode(dev_data, dst_set);
if (src_node && dst_node) {
dst_node->PerformCopyUpdate(&p_cds[i], src_node);
}
@@ -1862,21 +1863,21 @@
}
// These helper functions carry out the validate and record descriptor updates peformed via update templates. They decode
// the templatized data and leverage the non-template UpdateDescriptor helper functions.
-bool cvdescriptorset::ValidateUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
- const TEMPLATE_STATE *template_state, const void *pData) {
+bool CoreChecks::ValidateUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
+ const TEMPLATE_STATE *template_state, const void *pData) {
// Translate the templated update into a normal update for validation...
- DecodedTemplateUpdate decoded_update(device_data, descriptorSet, template_state, pData);
+ cvdescriptorset::DecodedTemplateUpdate decoded_update(device_data, descriptorSet, template_state, pData);
return ValidateUpdateDescriptorSets(GetReportData(device_data), device_data,
static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(),
0, NULL, "vkUpdateDescriptorSetWithTemplate()");
}
-void cvdescriptorset::PerformUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
- const TEMPLATE_STATE *template_state, const void *pData) {
+void CoreChecks::PerformUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
+ const TEMPLATE_STATE *template_state, const void *pData) {
// Translate the templated update into a normal update for validation...
- DecodedTemplateUpdate decoded_update(device_data, descriptorSet, template_state, pData);
- PerformUpdateDescriptorSets(device_data, static_cast<uint32_t>(decoded_update.desc_writes.size()),
- decoded_update.desc_writes.data(), 0, NULL);
+ cvdescriptorset::DecodedTemplateUpdate decoded_update(device_data, descriptorSet, template_state, pData);
+ cvdescriptorset::PerformUpdateDescriptorSets(device_data, static_cast<uint32_t>(decoded_update.desc_writes.size()),
+ decoded_update.desc_writes.data(), 0, NULL);
}
std::string cvdescriptorset::DescriptorSet::StringifySetAndLayout() const {
@@ -2092,10 +2093,11 @@
const char *func_name, std::string *error_code,
std::string *error_msg) const {
// First make sure that buffer is valid
- auto buffer_node = GetBufferState(device_data_, buffer_info->buffer);
+ auto buffer_node = device_data_->GetBufferState(device_data_, buffer_info->buffer);
// Any invalid buffer should already be caught by object_tracker
assert(buffer_node);
- if (ValidateMemoryIsBoundToBuffer(device_data_, buffer_node, func_name, "VUID-VkWriteDescriptorSet-descriptorType-00329")) {
+ if (device_data_->ValidateMemoryIsBoundToBuffer(device_data_, buffer_node, func_name,
+ "VUID-VkWriteDescriptorSet-descriptorType-00329")) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00329";
*error_msg = "No memory bound to buffer.";
return false;
@@ -2192,11 +2194,11 @@
*error_msg = error_str.str();
return false;
}
- if (GetDeviceExtensions(device_data_)->vk_khr_sampler_ycbcr_conversion) {
+ if (device_data_->GetDeviceExtensions(device_data_)->vk_khr_sampler_ycbcr_conversion) {
ImageSamplerDescriptor *desc = (ImageSamplerDescriptor *)descriptors_[index].get();
if (desc->IsImmutableSampler()) {
- auto sampler_state = GetSamplerState(device_data_, desc->GetSampler());
- auto iv_state = GetImageViewState(device_data_, image_view);
+ auto sampler_state = device_data_->GetSamplerState(device_data_, desc->GetSampler());
+ auto iv_state = device_data_->GetImageViewState(device_data_, image_view);
if (iv_state && sampler_state) {
if (iv_state->samplerConversion != sampler_state->samplerConversion) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-01948";
@@ -2250,7 +2252,7 @@
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
for (uint32_t di = 0; di < update->descriptorCount; ++di) {
auto buffer_view = update->pTexelBufferView[di];
- auto bv_state = GetBufferViewState(device_data_, buffer_view);
+ auto bv_state = device_data_->GetBufferViewState(device_data_, buffer_view);
if (!bv_state) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323";
std::stringstream error_str;
@@ -2259,7 +2261,7 @@
return false;
}
auto buffer = bv_state->create_info.buffer;
- auto buffer_state = GetBufferState(device_data_, buffer);
+ auto buffer_state = device_data_->GetBufferState(device_data_, buffer);
// Verify that buffer underlying the view hasn't been destroyed prematurely
if (!buffer_state) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323";
@@ -2380,7 +2382,7 @@
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
auto buffer_view = static_cast<TexelDescriptor *>(src_desc)->GetBufferView();
- auto bv_state = GetBufferViewState(device_data_, buffer_view);
+ auto bv_state = device_data_->GetBufferViewState(device_data_, buffer_view);
if (!bv_state) {
*error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323";
std::stringstream error_str;
@@ -2389,7 +2391,7 @@
return false;
}
auto buffer = bv_state->create_info.buffer;
- if (!ValidateBufferUsage(GetBufferState(device_data_, buffer), type, error_code, error_msg)) {
+ if (!ValidateBufferUsage(device_data_->GetBufferState(device_data_, buffer), type, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted copy update to texel buffer descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
@@ -2403,7 +2405,7 @@
const auto src_desc = src_set->descriptors_[index + di].get();
if (!src_desc->updated) continue;
auto buffer = static_cast<BufferDescriptor *>(src_desc)->GetBuffer();
- if (!ValidateBufferUsage(GetBufferState(device_data_, buffer), type, error_code, error_msg)) {
+ if (!ValidateBufferUsage(device_data_->GetBufferState(device_data_, buffer), type, error_code, error_msg)) {
std::stringstream error_str;
error_str << "Attempted copy update to buffer descriptor failed due to: " << error_msg->c_str();
*error_msg = error_str.str();
@@ -2423,8 +2425,8 @@
return true;
}
// Update the common AllocateDescriptorSetsData
-void cvdescriptorset::UpdateAllocateDescriptorSetsData(const layer_data *dev_data, const VkDescriptorSetAllocateInfo *p_alloc_info,
- AllocateDescriptorSetsData *ds_data) {
+void CoreChecks::UpdateAllocateDescriptorSetsData(const layer_data *dev_data, const VkDescriptorSetAllocateInfo *p_alloc_info,
+ cvdescriptorset::AllocateDescriptorSetsData *ds_data) {
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
auto layout = GetDescriptorSetLayout(dev_data, p_alloc_info->pSetLayouts[i]);
if (layout) {
@@ -2440,11 +2442,10 @@
}
}
// Verify that the state at allocate time is correct, but don't actually allocate the sets yet
-bool cvdescriptorset::ValidateAllocateDescriptorSets(const core_validation::layer_data *dev_data,
- const VkDescriptorSetAllocateInfo *p_alloc_info,
- const AllocateDescriptorSetsData *ds_data) {
+bool CoreChecks::ValidateAllocateDescriptorSets(const layer_data *dev_data, const VkDescriptorSetAllocateInfo *p_alloc_info,
+ const cvdescriptorset::AllocateDescriptorSetsData *ds_data) {
bool skip = false;
- auto report_data = core_validation::GetReportData(dev_data);
+ auto report_data = GetReportData(dev_data);
auto pool_state = GetDescriptorPoolState(dev_data, p_alloc_info->descriptorPool);
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
@@ -2518,12 +2519,12 @@
return skip;
}
// Decrement allocated sets from the pool and insert new sets into set_map
-void cvdescriptorset::PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info,
- const VkDescriptorSet *descriptor_sets,
- const AllocateDescriptorSetsData *ds_data,
- std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> *pool_map,
- std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> *set_map,
- layer_data *dev_data) {
+void CoreChecks::PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info,
+ const VkDescriptorSet *descriptor_sets,
+ const cvdescriptorset::AllocateDescriptorSetsData *ds_data,
+ std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> *pool_map,
+ std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> *set_map,
+ layer_data *dev_data) {
auto pool_state = (*pool_map)[p_alloc_info->descriptorPool];
// Account for sets and individual descriptors allocated from pool
pool_state->availableSets -= p_alloc_info->descriptorSetCount;
@@ -2539,7 +2540,7 @@
uint32_t variable_count = variable_count_valid ? variable_count_info->pDescriptorCounts[i] : 0;
auto new_ds = new cvdescriptorset::DescriptorSet(descriptor_sets[i], p_alloc_info->descriptorPool, ds_data->layout_nodes[i],
- variable_count, dev_data);
+ variable_count, this);
pool_state->sets.insert(new_ds);
new_ds->in_use.store(0);
diff --git a/layers/descriptor_sets.h b/layers/descriptor_sets.h
index 9522157..012a178 100644
--- a/layers/descriptor_sets.h
+++ b/layers/descriptor_sets.h
@@ -34,7 +34,8 @@
#include <unordered_set>
#include <vector>
-using core_validation::layer_data;
+class CoreChecks;
+typedef CoreChecks layer_data;
// Descriptor Data structures
namespace cvdescriptorset {
@@ -301,7 +302,7 @@
virtual void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) = 0;
virtual void CopyUpdate(const Descriptor *) = 0;
// Create binding between resources of this descriptor and given cb_node
- virtual void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) = 0;
+ virtual void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) = 0;
virtual DescriptorClass GetClass() const { return descriptor_class; };
// Special fast-path check for SamplerDescriptors that are immutable
virtual bool IsImmutableSampler() const { return false; };
@@ -314,16 +315,16 @@
};
// Shared helper functions - These are useful because the shared sampler image descriptor type
// performs common functions with both sampler and image descriptors so they can share their common functions
-bool ValidateSampler(const VkSampler, const core_validation::layer_data *);
-bool ValidateImageUpdate(VkImageView, VkImageLayout, VkDescriptorType, const core_validation::layer_data *, const char *func_name,
- std::string *, std::string *);
+bool ValidateSampler(const VkSampler, layer_data *);
+bool ValidateImageUpdate(VkImageView, VkImageLayout, VkDescriptorType, layer_data *, const char *func_name, std::string *,
+ std::string *);
class SamplerDescriptor : public Descriptor {
public:
SamplerDescriptor(const VkSampler *);
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override;
void CopyUpdate(const Descriptor *) override;
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override;
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override;
virtual bool IsImmutableSampler() const override { return immutable_; };
VkSampler GetSampler() const { return sampler_; }
@@ -338,7 +339,7 @@
ImageSamplerDescriptor(const VkSampler *);
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override;
void CopyUpdate(const Descriptor *) override;
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override;
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override;
virtual bool IsImmutableSampler() const override { return immutable_; };
VkSampler GetSampler() const { return sampler_; }
VkImageView GetImageView() const { return image_view_; }
@@ -356,7 +357,7 @@
ImageDescriptor(const VkDescriptorType);
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override;
void CopyUpdate(const Descriptor *) override;
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override;
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override;
virtual bool IsStorage() const override { return storage_; }
VkImageView GetImageView() const { return image_view_; }
VkImageLayout GetImageLayout() const { return image_layout_; }
@@ -372,7 +373,7 @@
TexelDescriptor(const VkDescriptorType);
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override;
void CopyUpdate(const Descriptor *) override;
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override;
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override;
virtual bool IsStorage() const override { return storage_; }
VkBufferView GetBufferView() const { return buffer_view_; }
@@ -386,7 +387,7 @@
BufferDescriptor(const VkDescriptorType);
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override;
void CopyUpdate(const Descriptor *) override;
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override;
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override;
virtual bool IsDynamic() const override { return dynamic_; }
virtual bool IsStorage() const override { return storage_; }
VkBuffer GetBuffer() const { return buffer_; }
@@ -409,7 +410,7 @@
}
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override { updated = true; }
void CopyUpdate(const Descriptor *) override { updated = true; }
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override {}
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override {}
};
class AccelerationStructureDescriptor : public Descriptor {
@@ -420,7 +421,7 @@
}
void WriteUpdate(const VkWriteDescriptorSet *, const uint32_t) override { updated = true; }
void CopyUpdate(const Descriptor *) override { updated = true; }
- void UpdateDrawState(core_validation::layer_data *, GLOBAL_CB_NODE *) override {}
+ void UpdateDrawState(layer_data *, GLOBAL_CB_NODE *) override {}
};
// Structs to contain common elements that need to be shared between Validate* and Perform* calls below
@@ -431,11 +432,10 @@
};
// Helper functions for descriptor set functions that cross multiple sets
// "Validate" will make sure an update is ok without actually performing it
-bool ValidateUpdateDescriptorSets(const debug_report_data *, const core_validation::layer_data *, uint32_t,
- const VkWriteDescriptorSet *, uint32_t, const VkCopyDescriptorSet *, const char *func_name);
+bool ValidateUpdateDescriptorSets(const debug_report_data *, const layer_data *, uint32_t, const VkWriteDescriptorSet *, uint32_t,
+ const VkCopyDescriptorSet *, const char *func_name);
// "Perform" does the update with the assumption that ValidateUpdateDescriptorSets() has passed for the given update
-void PerformUpdateDescriptorSets(const core_validation::layer_data *, uint32_t, const VkWriteDescriptorSet *, uint32_t,
- const VkCopyDescriptorSet *);
+void PerformUpdateDescriptorSets(layer_data *, uint32_t, const VkWriteDescriptorSet *, uint32_t, const VkCopyDescriptorSet *);
// Helper class to encapsulate the descriptor update template decoding logic
struct DecodedTemplateUpdate {
@@ -444,22 +444,6 @@
DecodedTemplateUpdate(layer_data *device_data, VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state,
const void *pData, VkDescriptorSetLayout push_layout = VK_NULL_HANDLE);
};
-// Helper wrapping ValidateUpdateDescriptorSets, updating via templates
-bool ValidateUpdateDescriptorSetsWithTemplateKHR(layer_data *device_data, VkDescriptorSet descriptorSet,
- const TEMPLATE_STATE *template_state, const void *pData);
-// Helper wrapping PerformUpdateDescriptorSets, updating via templates
-void PerformUpdateDescriptorSetsWithTemplateKHR(layer_data *, VkDescriptorSet, const TEMPLATE_STATE *, const void *);
-// Update the common AllocateDescriptorSetsData struct which can then be shared between Validate* and Perform* funcs below
-void UpdateAllocateDescriptorSetsData(const layer_data *dev_data, const VkDescriptorSetAllocateInfo *,
- AllocateDescriptorSetsData *);
-// Validate that Allocation state is ok
-bool ValidateAllocateDescriptorSets(const core_validation::layer_data *, const VkDescriptorSetAllocateInfo *,
- const AllocateDescriptorSetsData *);
-// Update state based on allocating new descriptorsets
-void PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *, const VkDescriptorSet *, const AllocateDescriptorSetsData *,
- std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> *,
- std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> *,
- core_validation::layer_data *);
/*
* DescriptorSet class
@@ -482,7 +466,7 @@
class DescriptorSet : public BASE_NODE {
public:
DescriptorSet(const VkDescriptorSet, const VkDescriptorPool, const std::shared_ptr<DescriptorSetLayout const> &,
- uint32_t variable_count, core_validation::layer_data *);
+ uint32_t variable_count, layer_data *);
~DescriptorSet();
// A number of common Get* functions that return data based on layout from which this set was created
uint32_t GetTotalDescriptorCount() const { return p_layout_->GetTotalDescriptorCount(); };
@@ -582,8 +566,7 @@
DESCRIPTOR_POOL_STATE *pool_state_;
const std::shared_ptr<DescriptorSetLayout const> p_layout_;
std::vector<std::unique_ptr<Descriptor>> descriptors_;
- // Ptr to device data used for various data look-ups
- core_validation::layer_data *const device_data_;
+ layer_data *device_data_;
const VkPhysicalDeviceLimits limits_;
uint32_t variable_count_;
diff --git a/layers/gpu_validation.cpp b/layers/gpu_validation.cpp
index fe6851d..a7065b2 100644
--- a/layers/gpu_validation.cpp
+++ b/layers/gpu_validation.cpp
@@ -20,7 +20,9 @@
// Allow use of STL min and max functions in Windows
#define NOMINMAX
+#include "chassis.h"
#include "core_validation.h"
+#include "gpu_validation.h"
#include "shader_validation.h"
#include "spirv-tools/libspirv.h"
#include "spirv-tools/optimizer.hpp"
@@ -33,6 +35,27 @@
static const uint32_t kNumBindingsInSet = 1;
// Implementation for Device Memory Manager class
+GpuDeviceMemoryManager::GpuDeviceMemoryManager(layer_data *dev_data, uint32_t data_size) {
+ uint32_t align = static_cast<uint32_t>(dev_data->GetPDProperties(dev_data)->limits.minStorageBufferOffsetAlignment);
+ if (0 == align) {
+ align = 1;
+ }
+ record_size_ = data_size;
+ // Round the requested size up to the next multiple of the storage buffer offset alignment
+ // so that we can address each block in the storage buffer using the offset.
+ block_size_ = ((record_size_ + align - 1) / align) * align;
+ blocks_per_chunk_ = kItemsPerChunk;
+ chunk_size_ = blocks_per_chunk_ * block_size_;
+ dev_data_ = dev_data;
+}
+
+GpuDeviceMemoryManager::~GpuDeviceMemoryManager() {
+ for (auto &chunk : chunk_list_) {
+ FreeMemoryChunk(chunk);
+ }
+ chunk_list_.clear();
+}
+
VkResult GpuDeviceMemoryManager::GetBlock(GpuDeviceMemoryBlock *block) {
assert(block->buffer == VK_NULL_HANDLE); // avoid possible overwrite/leak of an allocated block
VkResult result = VK_SUCCESS;
@@ -101,7 +124,7 @@
bool GpuDeviceMemoryManager::MemoryTypeFromProperties(uint32_t typeBits, VkFlags requirements_mask, uint32_t *typeIndex) {
// Search memtypes to find first index with those properties
- const VkPhysicalDeviceMemoryProperties *props = GetPhysicalDeviceMemoryProperties(dev_data_);
+ const VkPhysicalDeviceMemoryProperties *props = dev_data_->GetPhysicalDeviceMemoryProperties(dev_data_);
for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; i++) {
if ((typeBits & 1) == 1) {
// Type is available, does it match user properties?
@@ -125,17 +148,17 @@
VkResult result = VK_SUCCESS;
bool pass;
void *pData;
- const auto *dispatch_table = GetDispatchTable(dev_data_);
+ const auto *dispatch_table = dev_data_->GetDispatchTable(dev_data_);
buffer_create_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
buffer_create_info.size = chunk_size_;
- result = dispatch_table->CreateBuffer(GetDevice(dev_data_), &buffer_create_info, NULL, &buffer);
+ result = dispatch_table->CreateBuffer(dev_data_->GetDevice(dev_data_), &buffer_create_info, NULL, &buffer);
if (result != VK_SUCCESS) {
return result;
}
- dispatch_table->GetBufferMemoryRequirements(GetDevice(dev_data_), buffer, &mem_reqs);
+ dispatch_table->GetBufferMemoryRequirements(dev_data_->GetDevice(dev_data_), buffer, &mem_reqs);
mem_alloc.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
mem_alloc.pNext = NULL;
@@ -144,29 +167,29 @@
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
&mem_alloc.memoryTypeIndex);
if (!pass) {
- dispatch_table->DestroyBuffer(GetDevice(dev_data_), buffer, NULL);
+ dispatch_table->DestroyBuffer(dev_data_->GetDevice(dev_data_), buffer, NULL);
return result;
}
- result = dispatch_table->AllocateMemory(GetDevice(dev_data_), &mem_alloc, NULL, &memory);
+ result = dispatch_table->AllocateMemory(dev_data_->GetDevice(dev_data_), &mem_alloc, NULL, &memory);
if (result != VK_SUCCESS) {
- dispatch_table->DestroyBuffer(GetDevice(dev_data_), buffer, NULL);
+ dispatch_table->DestroyBuffer(dev_data_->GetDevice(dev_data_), buffer, NULL);
return result;
}
- result = dispatch_table->BindBufferMemory(GetDevice(dev_data_), buffer, memory, 0);
+ result = dispatch_table->BindBufferMemory(dev_data_->GetDevice(dev_data_), buffer, memory, 0);
if (result != VK_SUCCESS) {
- dispatch_table->DestroyBuffer(GetDevice(dev_data_), buffer, NULL);
- dispatch_table->FreeMemory(GetDevice(dev_data_), memory, NULL);
+ dispatch_table->DestroyBuffer(dev_data_->GetDevice(dev_data_), buffer, NULL);
+ dispatch_table->FreeMemory(dev_data_->GetDevice(dev_data_), memory, NULL);
return result;
}
- result = dispatch_table->MapMemory(GetDevice(dev_data_), memory, 0, mem_alloc.allocationSize, 0, &pData);
+ result = dispatch_table->MapMemory(dev_data_->GetDevice(dev_data_), memory, 0, mem_alloc.allocationSize, 0, &pData);
if (result == VK_SUCCESS) {
memset(pData, 0, chunk_size_);
- dispatch_table->UnmapMemory(GetDevice(dev_data_), memory);
+ dispatch_table->UnmapMemory(dev_data_->GetDevice(dev_data_), memory);
} else {
- dispatch_table->DestroyBuffer(GetDevice(dev_data_), buffer, NULL);
- dispatch_table->FreeMemory(GetDevice(dev_data_), memory, NULL);
+ dispatch_table->DestroyBuffer(dev_data_->GetDevice(dev_data_), buffer, NULL);
+ dispatch_table->FreeMemory(dev_data_->GetDevice(dev_data_), memory, NULL);
return result;
}
chunk.buffer = buffer;
@@ -175,8 +198,8 @@
}
void GpuDeviceMemoryManager::FreeMemoryChunk(MemoryChunk &chunk) {
- GetDispatchTable(dev_data_)->DestroyBuffer(GetDevice(dev_data_), chunk.buffer, NULL);
- GetDispatchTable(dev_data_)->FreeMemory(GetDevice(dev_data_), chunk.memory, NULL);
+ dev_data_->GetDispatchTable(dev_data_)->DestroyBuffer(dev_data_->GetDevice(dev_data_), chunk.buffer, NULL);
+ dev_data_->GetDispatchTable(dev_data_)->FreeMemory(dev_data_->GetDevice(dev_data_), chunk.memory, NULL);
}
void GpuDeviceMemoryManager::FreeAllBlocks() {
@@ -187,9 +210,18 @@
}
// Implementation for Descriptor Set Manager class
+GpuDescriptorSetManager::GpuDescriptorSetManager(layer_data *dev_data) { dev_data_ = dev_data; }
+
+GpuDescriptorSetManager::~GpuDescriptorSetManager() {
+ for (auto &pool : desc_pool_map_) {
+ dev_data_->GetDispatchTable(dev_data_)->DestroyDescriptorPool(dev_data_->GetDevice(dev_data_), pool.first, NULL);
+ }
+ desc_pool_map_.clear();
+}
+
VkResult GpuDescriptorSetManager::GetDescriptorSets(uint32_t count, VkDescriptorPool *pool,
std::vector<VkDescriptorSet> *desc_sets) {
- auto gpu_state = GetGpuValidationState(dev_data_);
+ auto gpu_state = dev_data_->GetGpuValidationState(dev_data_);
const uint32_t default_pool_size = kItemsPerChunk;
VkResult result = VK_SUCCESS;
VkDescriptorPool pool_to_use = VK_NULL_HANDLE;
@@ -222,7 +254,8 @@
desc_pool_info.maxSets = pool_count;
desc_pool_info.poolSizeCount = 1;
desc_pool_info.pPoolSizes = &size_counts;
- result = GetDispatchTable(dev_data_)->CreateDescriptorPool(GetDevice(dev_data_), &desc_pool_info, NULL, &pool_to_use);
+ result = dev_data_->GetDispatchTable(dev_data_)->CreateDescriptorPool(dev_data_->GetDevice(dev_data_), &desc_pool_info,
+ NULL, &pool_to_use);
assert(result == VK_SUCCESS);
if (result != VK_SUCCESS) {
return result;
@@ -235,7 +268,8 @@
VkDescriptorSetAllocateInfo alloc_info = {VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, NULL, pool_to_use, count,
desc_layouts.data()};
- result = GetDispatchTable(dev_data_)->AllocateDescriptorSets(GetDevice(dev_data_), &alloc_info, desc_sets->data());
+ result = dev_data_->GetDispatchTable(dev_data_)->AllocateDescriptorSets(dev_data_->GetDevice(dev_data_), &alloc_info,
+ desc_sets->data());
assert(result == VK_SUCCESS);
if (result != VK_SUCCESS) {
return result;
@@ -248,14 +282,15 @@
void GpuDescriptorSetManager::PutBackDescriptorSet(VkDescriptorPool desc_pool, VkDescriptorSet desc_set) {
auto iter = desc_pool_map_.find(desc_pool);
if (iter != desc_pool_map_.end()) {
- VkResult result = GetDispatchTable(dev_data_)->FreeDescriptorSets(GetDevice(dev_data_), desc_pool, 1, &desc_set);
+ VkResult result =
+ dev_data_->GetDispatchTable(dev_data_)->FreeDescriptorSets(dev_data_->GetDevice(dev_data_), desc_pool, 1, &desc_set);
assert(result == VK_SUCCESS);
if (result != VK_SUCCESS) {
return;
}
desc_pool_map_[desc_pool].used--;
if (0 == desc_pool_map_[desc_pool].used) {
- GetDispatchTable(dev_data_)->DestroyDescriptorPool(GetDevice(dev_data_), desc_pool, NULL);
+ dev_data_->GetDispatchTable(dev_data_)->DestroyDescriptorPool(dev_data_->GetDevice(dev_data_), desc_pool, NULL);
desc_pool_map_.erase(desc_pool);
}
}
@@ -264,21 +299,21 @@
void GpuDescriptorSetManager::DestroyDescriptorPools() {
for (auto &pool : desc_pool_map_) {
- GetDispatchTable(dev_data_)->DestroyDescriptorPool(GetDevice(dev_data_), pool.first, NULL);
+ dev_data_->GetDispatchTable(dev_data_)->DestroyDescriptorPool(dev_data_->GetDevice(dev_data_), pool.first, NULL);
}
desc_pool_map_.clear();
}
// Convenience function for reporting problems with setting up GPU Validation.
-static void ReportSetupProblem(const layer_data *dev_data, VkDebugReportObjectTypeEXT object_type, uint64_t object_handle,
- const char *const specific_message) {
- log_msg(GetReportData(dev_data), VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle,
- "UNASSIGNED-GPU-Assisted Validation Error. ", "Detail: (%s)", specific_message);
+void CoreChecks::ReportSetupProblem(const layer_data *dev_data, VkDebugReportObjectTypeEXT object_type, uint64_t object_handle,
+ const char *const specific_message) {
+ log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, object_type, object_handle, "UNASSIGNED-GPU-Assisted Validation Error. ",
+ "Detail: (%s)", specific_message);
}
// Turn on necessary device features.
-void GpuPreCallRecordCreateDevice(VkPhysicalDevice gpu, std::unique_ptr<safe_VkDeviceCreateInfo> &create_info,
- VkPhysicalDeviceFeatures *supported_features) {
+void CoreChecks::GpuPreCallRecordCreateDevice(VkPhysicalDevice gpu, std::unique_ptr<safe_VkDeviceCreateInfo> &create_info,
+ VkPhysicalDeviceFeatures *supported_features) {
if (supported_features->fragmentStoresAndAtomics || supported_features->vertexPipelineStoresAndAtomics) {
VkPhysicalDeviceFeatures new_features = {};
if (create_info->pEnabledFeatures) {
@@ -292,7 +327,7 @@
}
// Perform initializations that can be done at Create Device time.
-void GpuPostCallRecordCreateDevice(layer_data *dev_data) {
+void CoreChecks::GpuPostCallRecordCreateDevice(layer_data *dev_data) {
auto gpu_state = GetGpuValidationState(dev_data);
const auto *dispatch_table = GetDispatchTable(dev_data);
@@ -372,7 +407,7 @@
}
// Clean up device-related resources
-void GpuPreCallRecordDestroyDevice(layer_data *dev_data) {
+void CoreChecks::GpuPreCallRecordDestroyDevice(layer_data *dev_data) {
auto gpu_state = GetGpuValidationState(dev_data);
if (gpu_state->barrier_command_buffer) {
@@ -397,10 +432,10 @@
}
// Modify the pipeline layout to include our debug descriptor set and any needed padding with the dummy descriptor set.
-bool GpuPreCallCreatePipelineLayout(layer_data *device_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
- std::vector<VkDescriptorSetLayout> *new_layouts,
- VkPipelineLayoutCreateInfo *modified_create_info) {
+bool CoreChecks::GpuPreCallCreatePipelineLayout(layer_data *device_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
+ std::vector<VkDescriptorSetLayout> *new_layouts,
+ VkPipelineLayoutCreateInfo *modified_create_info) {
auto gpu_state = GetGpuValidationState(device_data);
if (gpu_state->aborted) {
return false;
@@ -433,7 +468,7 @@
}
// Clean up GPU validation after the CreatePipelineLayout call is made
-void GpuPostCallCreatePipelineLayout(layer_data *device_data, VkResult result) {
+void CoreChecks::GpuPostCallCreatePipelineLayout(layer_data *device_data, VkResult result) {
auto gpu_state = GetGpuValidationState(device_data);
// Clean up GPU validation
if (result != VK_SUCCESS) {
@@ -444,7 +479,8 @@
}
// Free the device memory and descriptor set associated with a command buffer.
-void GpuPreCallRecordFreeCommandBuffers(layer_data *dev_data, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
+void CoreChecks::GpuPreCallRecordFreeCommandBuffers(layer_data *dev_data, uint32_t commandBufferCount,
+ const VkCommandBuffer *pCommandBuffers) {
auto gpu_state = GetGpuValidationState(dev_data);
if (gpu_state->aborted) {
return;
@@ -467,7 +503,7 @@
}
// Just gives a warning about a possible deadlock.
-void GpuPreCallValidateCmdWaitEvents(layer_data *dev_data, VkPipelineStageFlags sourceStageMask) {
+void CoreChecks::GpuPreCallValidateCmdWaitEvents(layer_data *dev_data, VkPipelineStageFlags sourceStageMask) {
if (sourceStageMask & VK_PIPELINE_STAGE_HOST_BIT) {
ReportSetupProblem(dev_data, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, HandleToUint64(GetDevice(dev_data)),
"CmdWaitEvents recorded with VK_PIPELINE_STAGE_HOST_BIT set. "
@@ -479,7 +515,7 @@
// Examine the pipelines to see if they use the debug descriptor set binding index.
// If any do, create new non-instrumented shader modules and use them to replace the instrumented
// shaders in the pipeline. Return the (possibly) modified create infos to the caller.
-std::vector<safe_VkGraphicsPipelineCreateInfo> GpuPreCallRecordCreateGraphicsPipelines(
+std::vector<safe_VkGraphicsPipelineCreateInfo> CoreChecks::GpuPreCallRecordCreateGraphicsPipelines(
layer_data *dev_data, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos,
const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, std::vector<std::unique_ptr<PIPELINE_STATE>> &pipe_state) {
auto gpu_state = GetGpuValidationState(dev_data);
@@ -535,9 +571,9 @@
// - Destroy it since it has been bound into the pipeline by now. This is our only chance to delete it.
// - Track the shader in the shader_map
// - Save the shader binary if it contains debug code
-void GpuPostCallRecordCreateGraphicsPipelines(layer_data *dev_data, const uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
+void CoreChecks::GpuPostCallRecordCreateGraphicsPipelines(layer_data *dev_data, const uint32_t count,
+ const VkGraphicsPipelineCreateInfo *pCreateInfos,
+ const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
auto gpu_state = GetGpuValidationState(dev_data);
for (uint32_t pipeline = 0; pipeline < count; ++pipeline) {
auto pipeline_state = GetPipelineState(dev_data, pPipelines[pipeline]);
@@ -572,7 +608,7 @@
}
// Remove all the shader trackers associated with this destroyed pipeline.
-void GpuPreCallRecordDestroyPipeline(layer_data *dev_data, const VkPipeline pipeline) {
+void CoreChecks::GpuPreCallRecordDestroyPipeline(layer_data *dev_data, const VkPipeline pipeline) {
auto gpu_state = GetGpuValidationState(dev_data);
for (auto it = gpu_state->shader_map.begin(); it != gpu_state->shader_map.end();) {
if (it->second.pipeline == pipeline) {
@@ -584,8 +620,8 @@
}
// Call the SPIR-V Optimizer to run the instrumentation pass on the shader.
-static bool GpuInstrumentShader(layer_data *dev_data, const VkShaderModuleCreateInfo *pCreateInfo,
- std::vector<unsigned int> &new_pgm, uint32_t *unique_shader_id) {
+bool CoreChecks::GpuInstrumentShader(layer_data *dev_data, const VkShaderModuleCreateInfo *pCreateInfo,
+ std::vector<unsigned int> &new_pgm, uint32_t *unique_shader_id) {
auto gpu_state = GetGpuValidationState(dev_data);
if (gpu_state->aborted) return false;
if (pCreateInfo->pCode[0] != spv::MagicNumber) return false;
@@ -613,10 +649,10 @@
}
// Create the instrumented shader data to provide to the driver.
-bool GpuPreCallCreateShaderModule(layer_data *dev_data, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule,
- uint32_t *unique_shader_id, VkShaderModuleCreateInfo *instrumented_create_info,
- std::vector<unsigned int> *instrumented_pgm) {
+bool CoreChecks::GpuPreCallCreateShaderModule(layer_data *dev_data, const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule,
+ uint32_t *unique_shader_id, VkShaderModuleCreateInfo *instrumented_create_info,
+ std::vector<unsigned int> *instrumented_pgm) {
bool pass = GpuInstrumentShader(dev_data, pCreateInfo, *instrumented_pgm, unique_shader_id);
if (pass) {
instrumented_create_info->pCode = instrumented_pgm->data();
@@ -683,8 +719,7 @@
msg = strm.str();
}
-static std::string LookupDebugUtilsName(const layer_data *device_data, const uint64_t object) {
- debug_report_data *report_data = device_data->report_data;
+static std::string LookupDebugUtilsName(const debug_report_data *report_data, const uint64_t object) {
auto object_label = report_data->DebugReportGetUtilsObjectName(object);
if (object_label != "") {
object_label = "(" + object_label + ")";
@@ -693,24 +728,24 @@
}
// Generate message from the common portion of the debug report record.
-static void GenerateCommonMessage(const layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t *debug_record,
+static void GenerateCommonMessage(const debug_report_data *report_data, const GLOBAL_CB_NODE *cb_node, const uint32_t *debug_record,
const VkShaderModule shader_module_handle, const VkPipeline pipeline_handle,
const uint32_t draw_index, std::string &msg) {
using namespace spvtools;
std::ostringstream strm;
if (shader_module_handle == VK_NULL_HANDLE) {
strm << std::hex << std::showbase << "Internal Error: Unable to locate information for shader used in command buffer "
- << LookupDebugUtilsName(dev_data, HandleToUint64(cb_node->commandBuffer)) << "("
+ << LookupDebugUtilsName(report_data, HandleToUint64(cb_node->commandBuffer)) << "("
<< HandleToUint64(cb_node->commandBuffer) << "). ";
assert(true);
} else {
strm << std::hex << std::showbase << "Command buffer "
- << LookupDebugUtilsName(dev_data, HandleToUint64(cb_node->commandBuffer)) << "("
+ << LookupDebugUtilsName(report_data, HandleToUint64(cb_node->commandBuffer)) << "("
<< HandleToUint64(cb_node->commandBuffer) << "). "
<< "Draw Index " << draw_index << ". "
- << "Pipeline " << LookupDebugUtilsName(dev_data, HandleToUint64(pipeline_handle)) << "("
+ << "Pipeline " << LookupDebugUtilsName(report_data, HandleToUint64(pipeline_handle)) << "("
<< HandleToUint64(pipeline_handle) << "). "
- << "Shader Module " << LookupDebugUtilsName(dev_data, HandleToUint64(shader_module_handle)) << "("
+ << "Shader Module " << LookupDebugUtilsName(report_data, HandleToUint64(shader_module_handle)) << "("
<< HandleToUint64(shader_module_handle) << "). ";
}
strm << std::dec << std::noshowbase;
@@ -933,8 +968,8 @@
// sure it is available when the pipeline is submitted. (The ShaderModule tracking object also
// keeps a copy, but it can be destroyed after the pipeline is created and before it is submitted.)
//
-static void AnalyzeAndReportError(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkQueue queue, uint32_t draw_index,
- uint32_t *const debug_output_buffer) {
+void CoreChecks::AnalyzeAndReportError(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkQueue queue, uint32_t draw_index,
+ uint32_t *const debug_output_buffer) {
using namespace spvtools;
const uint32_t total_words = debug_output_buffer[0];
// A zero here means that the shader instrumentation didn't write anything.
@@ -973,7 +1008,7 @@
}
GenerateValidationMessage(debug_record, validation_message, vuid_msg);
GenerateStageMessage(debug_record, stage_message);
- GenerateCommonMessage(dev_data, cb_node, debug_record, shader_module_handle, pipeline_handle, draw_index, common_message);
+ GenerateCommonMessage(report_data, cb_node, debug_record, shader_module_handle, pipeline_handle, draw_index, common_message);
GenerateSourceMessages(pgm, debug_record, filename_message, source_message);
log_msg(GetReportData(dev_data), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, HandleToUint64(queue),
vuid_msg.c_str(), "%s %s %s %s%s", validation_message.c_str(), common_message.c_str(), stage_message.c_str(),
@@ -985,7 +1020,7 @@
}
// For the given command buffer, map its debug data buffers and read their contents for analysis.
-static void ProcessInstrumentationBuffer(const layer_data *dev_data, VkQueue queue, GLOBAL_CB_NODE *cb_node) {
+void CoreChecks::ProcessInstrumentationBuffer(const layer_data *dev_data, VkQueue queue, GLOBAL_CB_NODE *cb_node) {
auto gpu_state = GetGpuValidationState(dev_data);
if (cb_node && cb_node->hasDrawCmd && cb_node->gpu_buffer_list.size() > 0) {
VkResult result;
@@ -1015,7 +1050,7 @@
// Submit a memory barrier on graphics queues.
// Lazy-create and record the needed command buffer.
-static void SubmitBarrier(layer_data *dev_data, VkQueue queue) {
+void CoreChecks::SubmitBarrier(layer_data *dev_data, VkQueue queue) {
auto gpu_state = GetGpuValidationState(dev_data);
const auto *dispatch_table = GetDispatchTable(dev_data);
uint32_t queue_family_index = 0;
@@ -1027,7 +1062,7 @@
// Pay attention only to queues that support graphics.
// This ensures that the command buffer pool is created so that it can be used on a graphics queue.
- VkQueueFlags queue_flags = GetPhysicalDeviceState(dev_data)->queue_family_properties[queue_family_index].queueFlags;
+ VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (!(queue_flags & VK_QUEUE_GRAPHICS_BIT)) {
return;
}
@@ -1095,14 +1130,14 @@
// Issue a memory barrier to make GPU-written data available to host.
// Wait for the queue to complete execution.
// Check the debug buffers for all the command buffers that were submitted.
-void GpuPostCallQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
- VkFence fence) {
+void CoreChecks::GpuPostCallQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits,
+ VkFence fence) {
auto gpu_state = GetGpuValidationState(dev_data);
if (gpu_state->aborted) return;
SubmitBarrier(dev_data, queue);
- dev_data->dispatch_table.QueueWaitIdle(queue);
+ dev_data->device_dispatch_table.QueueWaitIdle(queue);
for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
const VkSubmitInfo *submit = &pSubmits[submit_idx];
@@ -1116,7 +1151,8 @@
}
}
-void GpuAllocateValidationResources(layer_data *dev_data, const VkCommandBuffer cmd_buffer, const VkPipelineBindPoint bind_point) {
+void CoreChecks::GpuAllocateValidationResources(layer_data *dev_data, const VkCommandBuffer cmd_buffer,
+ const VkPipelineBindPoint bind_point) {
VkResult result;
if (!(GetEnables(dev_data)->gpu_validation)) return;
diff --git a/layers/gpu_validation.h b/layers/gpu_validation.h
index 3dad9e5..ea17c3a 100644
--- a/layers/gpu_validation.h
+++ b/layers/gpu_validation.h
@@ -27,21 +27,13 @@
// The interface allows the caller to "get" and "put back" blocks.
// The manager allocates and frees chunks as needed.
+class CoreChecks;
+typedef CoreChecks layer_data;
+
class GpuDeviceMemoryManager {
public:
- GpuDeviceMemoryManager(layer_data *dev_data, uint32_t data_size) {
- uint32_t align = static_cast<uint32_t>(GetPDProperties(dev_data)->limits.minStorageBufferOffsetAlignment);
- if (0 == align) {
- align = 1;
- }
- record_size_ = data_size;
- // Round the requested size up to the next multiple of the storage buffer offset alignment
- // so that we can address each block in the storage buffer using the offset.
- block_size_ = ((record_size_ + align - 1) / align) * align;
- blocks_per_chunk_ = kItemsPerChunk;
- chunk_size_ = blocks_per_chunk_ * block_size_;
- dev_data_ = dev_data;
- }
+ GpuDeviceMemoryManager(layer_data *dev_data, uint32_t data_size);
+ ~GpuDeviceMemoryManager();
uint32_t GetBlockSize() { return block_size_; }
@@ -79,7 +71,8 @@
// as needed to satisfy requests for descriptor sets.
class GpuDescriptorSetManager {
public:
- GpuDescriptorSetManager(layer_data *dev_data) { dev_data_ = dev_data; }
+ GpuDescriptorSetManager(layer_data *dev_data);
+ ~GpuDescriptorSetManager();
VkResult GetDescriptorSets(uint32_t count, VkDescriptorPool *pool, std::vector<VkDescriptorSet> *desc_sets);
void PutBackDescriptorSet(VkDescriptorPool desc_pool, VkDescriptorSet desc_set);
@@ -100,29 +93,4 @@
using lock_guard_t = std::lock_guard<mutex_t>;
using unique_lock_t = std::unique_lock<mutex_t>;
-void GpuPreCallRecordCreateDevice(VkPhysicalDevice gpu, std::unique_ptr<safe_VkDeviceCreateInfo> &modified_create_info,
- VkPhysicalDeviceFeatures *supported_features);
-void GpuPostCallRecordCreateDevice(layer_data *dev_data);
-void GpuPreCallRecordDestroyDevice(layer_data *dev_data);
-void GpuPreCallRecordFreeCommandBuffers(layer_data *dev_data, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers);
-bool GpuPreCallCreateShaderModule(layer_data *dev_data, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule,
- uint32_t *unique_shader_id, VkShaderModuleCreateInfo *instrumented_create_info,
- std::vector<unsigned int> *instrumented_pgm);
-bool GpuPreCallCreatePipelineLayout(layer_data *device_data, const VkPipelineLayoutCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout,
- std::vector<VkDescriptorSetLayout> *new_layouts,
- VkPipelineLayoutCreateInfo *modified_create_info);
-void GpuPostCallCreatePipelineLayout(layer_data *device_data, VkResult result);
-void GpuPostCallQueueSubmit(layer_data *dev_data, VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence);
-void GpuPreCallValidateCmdWaitEvents(layer_data *dev_data, VkPipelineStageFlags sourceStageMask);
-std::vector<safe_VkGraphicsPipelineCreateInfo> GpuPreCallRecordCreateGraphicsPipelines(
- layer_data *dev_data, VkPipelineCache pipelineCache, uint32_t count, const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines, std::vector<std::unique_ptr<PIPELINE_STATE>> &pipe_state);
-void GpuPostCallRecordCreateGraphicsPipelines(layer_data *dev_data, const uint32_t count,
- const VkGraphicsPipelineCreateInfo *pCreateInfos,
- const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines);
-void GpuPreCallRecordDestroyPipeline(layer_data *dev_data, const VkPipeline pipeline);
-void GpuAllocateValidationResources(layer_data *dev_data, const VkCommandBuffer cmd_buffer, VkPipelineBindPoint bind_point);
-
#endif // VULKAN_GPU_VALIDATION_H
diff --git a/layers/shader_validation.cpp b/layers/shader_validation.cpp
index e945bf0..a425f66 100644
--- a/layers/shader_validation.cpp
+++ b/layers/shader_validation.cpp
@@ -32,19 +32,12 @@
#include "vk_layer_data.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
+#include "chassis.h"
#include "core_validation.h"
#include "shader_validation.h"
#include "spirv-tools/libspirv.h"
#include "xxhash.h"
-namespace core_validation {
-extern unordered_map<void *, layer_data *> layer_data_map;
-extern unordered_map<void *, instance_layer_data *> instance_layer_data_map;
-}; // namespace core_validation
-
-using core_validation::instance_layer_data_map;
-using core_validation::layer_data_map;
-
enum FORMAT_TYPE {
FORMAT_TYPE_FLOAT = 1, // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
FORMAT_TYPE_SINT = 2,
@@ -1426,8 +1419,8 @@
return false;
}
-static bool ValidateShaderCapabilities(layer_data *dev_data, shader_module const *src, VkShaderStageFlagBits stage,
- bool has_writable_descriptor) {
+bool CoreChecks::ValidateShaderCapabilities(layer_data *dev_data, shader_module const *src, VkShaderStageFlagBits stage,
+ bool has_writable_descriptor) {
bool skip = false;
auto report_data = GetReportData(dev_data);
@@ -1654,8 +1647,8 @@
return false;
}
-static bool ValidateShaderStageInputOutputLimits(layer_data *dev_data, shader_module const *src,
- VkPipelineShaderStageCreateInfo const *pStage, PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateShaderStageInputOutputLimits(layer_data *dev_data, shader_module const *src,
+ VkPipelineShaderStageCreateInfo const *pStage, PIPELINE_STATE *pipeline) {
if (pStage->stage == VK_SHADER_STAGE_COMPUTE_BIT || pStage->stage == VK_SHADER_STAGE_ALL_GRAPHICS ||
pStage->stage == VK_SHADER_STAGE_ALL) {
return false;
@@ -1837,7 +1830,7 @@
return skip;
}
-static uint32_t DescriptorTypeToReqs(shader_module const *module, uint32_t type_id) {
+uint32_t DescriptorTypeToReqs(shader_module const *module, uint32_t type_id) {
auto type = module->get_def(type_id);
while (true) {
@@ -1948,8 +1941,8 @@
// * gl_PointSize must be written in the final geometry stage
// - If shaderTessellationAndGeometryPointSize feature is disabled:
// * gl_PointSize must NOT be written and a default of 1.0 is assumed
-bool ValidatePointListShaderState(const layer_data *dev_data, const PIPELINE_STATE *pipeline, shader_module const *src,
- spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) {
+bool CoreChecks::ValidatePointListShaderState(const layer_data *dev_data, const PIPELINE_STATE *pipeline, shader_module const *src,
+ spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) {
if (pipeline->topology_at_rasterizer != VK_PRIMITIVE_TOPOLOGY_POINT_LIST) {
return false;
}
@@ -1996,9 +1989,9 @@
return skip;
}
-static bool ValidatePipelineShaderStage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
- PIPELINE_STATE *pipeline, shader_module const **out_module, spirv_inst_iter *out_entrypoint,
- bool check_point_size) {
+bool CoreChecks::ValidatePipelineShaderStage(layer_data *dev_data, VkPipelineShaderStageCreateInfo const *pStage,
+ PIPELINE_STATE *pipeline, shader_module const **out_module,
+ spirv_inst_iter *out_entrypoint, bool check_point_size) {
bool skip = false;
auto module = *out_module = GetShaderModuleState(dev_data, pStage->module);
auto report_data = GetReportData(dev_data);
@@ -2187,7 +2180,7 @@
// Validate that the shaders used by the given pipeline and store the active_slots
// that are actually used by the pipeline into pPipeline->active_slots
-bool ValidateAndCapturePipelineShaderState(layer_data *dev_data, PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateAndCapturePipelineShaderState(layer_data *dev_data, PIPELINE_STATE *pipeline) {
auto pCreateInfo = pipeline->graphicsPipelineCI.ptr();
int vertex_stage = GetShaderStageId(VK_SHADER_STAGE_VERTEX_BIT);
int fragment_stage = GetShaderStageId(VK_SHADER_STAGE_FRAGMENT_BIT);
@@ -2250,7 +2243,7 @@
return skip;
}
-bool ValidateComputePipeline(layer_data *dev_data, PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateComputePipeline(layer_data *dev_data, PIPELINE_STATE *pipeline) {
auto pCreateInfo = pipeline->computePipelineCI.ptr();
shader_module const *module;
@@ -2259,7 +2252,7 @@
return ValidatePipelineShaderStage(dev_data, &pCreateInfo->stage, pipeline, &module, &entrypoint, false);
}
-bool ValidateRayTracingPipelineNV(layer_data *dev_data, PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateRayTracingPipelineNV(layer_data *dev_data, PIPELINE_STATE *pipeline) {
auto pCreateInfo = pipeline->raytracingPipelineCI.ptr();
shader_module const *module;
@@ -2279,8 +2272,8 @@
return nullptr;
}
-bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
+bool CoreChecks::PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule) {
layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
bool skip = false;
@@ -2343,9 +2336,11 @@
return skip;
}
-void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule, void *csm_state_data) {
- layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), core_validation::layer_data_map);
+void CoreChecks::PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule,
+ void *csm_state_data) {
+ layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
+
create_shader_module_api_state *csm_state = reinterpret_cast<create_shader_module_api_state *>(csm_state_data);
if (GetEnables(device_data)->gpu_validation) {
GpuPreCallCreateShaderModule(device_data, pCreateInfo, pAllocator, pShaderModule, &csm_state->unique_shader_id,
@@ -2353,10 +2348,11 @@
}
}
-void PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule, VkResult result,
- void *csm_state_data) {
- layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), core_validation::layer_data_map);
+void CoreChecks::PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
+ const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule,
+ VkResult result, void *csm_state_data) {
+ layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
+
if (VK_SUCCESS != result) return;
create_shader_module_api_state *csm_state = reinterpret_cast<create_shader_module_api_state *>(csm_state_data);
diff --git a/layers/shader_validation.h b/layers/shader_validation.h
index 54bd0d1..230adc1 100644
--- a/layers/shader_validation.h
+++ b/layers/shader_validation.h
@@ -206,16 +206,6 @@
}
};
-bool ValidateAndCapturePipelineShaderState(layer_data *dev_data, PIPELINE_STATE *pPipeline);
-bool ValidateComputePipeline(layer_data *dev_data, PIPELINE_STATE *pPipeline);
-bool ValidateRayTracingPipelineNV(layer_data *dev_data, PIPELINE_STATE *pipeline);
typedef std::pair<unsigned, unsigned> descriptor_slot_t;
-bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule);
-void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule, void *csm_state);
-void PostCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *pAllocator, VkShaderModule *pShaderModule, VkResult result,
- void *csm_state);
#endif // VULKAN_SHADER_VALIDATION_H