layers: Const qualify pipeline validation paths
To provide additional assurance of the disentanglement of state and
validation operations, add const retrictions to the various validation
functions stemming from Create*Pipeline entrypoint.
Change-Id: I60b587655ec86fd3725a86c967f1102327868d34
diff --git a/layers/core_validation.cpp b/layers/core_validation.cpp
index 837b253..f29f0d7 100644
--- a/layers/core_validation.cpp
+++ b/layers/core_validation.cpp
@@ -1152,7 +1152,7 @@
}
}
-bool CoreChecks::ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) {
+bool CoreChecks::ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pPipelines, int pipelineIndex) const {
bool skip = false;
const PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex].get();
@@ -1161,7 +1161,7 @@
// pipeline correctly, and that the base pipeline was created to allow
// derivatives.
if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
- PIPELINE_STATE *pBasePipeline = nullptr;
+ const PIPELINE_STATE *pBasePipeline = nullptr;
if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
(pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
// This check is a superset of "VUID-VkGraphicsPipelineCreateInfo-flags-00724" and
@@ -1192,7 +1192,7 @@
}
// UNLOCKED pipeline validation. DO NOT lookup objects in the CoreChecks->* maps in this function.
-bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) {
+bool CoreChecks::ValidatePipelineUnlocked(const PIPELINE_STATE *pPipeline, uint32_t pipelineIndex) const {
bool skip = false;
// Ensure the subpass index is valid. If not, then ValidateGraphicsPipelineShaderState
@@ -4821,7 +4821,7 @@
}
bool CoreChecks::ValidatePipelineVertexDivisors(std::vector<std::unique_ptr<PIPELINE_STATE>> const &pipe_state_vec,
- const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) {
+ const uint32_t count, const VkGraphicsPipelineCreateInfo *pipe_cis) const {
bool skip = false;
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
diff --git a/layers/core_validation.h b/layers/core_validation.h
index 3daba00..d5b1728 100644
--- a/layers/core_validation.h
+++ b/layers/core_validation.h
@@ -496,8 +496,8 @@
void StoreMemRanges(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size);
bool ValidateIdleDescriptorSet(VkDescriptorSet set, const char* func_str);
void InitializeAndTrackMemory(VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, void** ppData);
- bool ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const& pPipelines, int pipelineIndex);
- bool ValidatePipelineUnlocked(const PIPELINE_STATE* pPipeline, uint32_t pipelineIndex);
+ bool ValidatePipelineLocked(std::vector<std::unique_ptr<PIPELINE_STATE>> const& pPipelines, int pipelineIndex) const;
+ bool ValidatePipelineUnlocked(const PIPELINE_STATE* pPipeline, uint32_t pipelineIndex) const;
bool ValidImageBufferQueue(CMD_BUFFER_STATE* cb_node, const VulkanTypedHandle& object, VkQueue queue, uint32_t count,
const uint32_t* indices);
bool ValidateFenceForSubmit(FENCE_STATE* pFence);
@@ -531,7 +531,7 @@
const VkDeviceQueueCreateInfo* infos);
bool ValidatePipelineVertexDivisors(std::vector<std::unique_ptr<PIPELINE_STATE>> const& pipe_state_vec, const uint32_t count,
- const VkGraphicsPipelineCreateInfo* pipe_cis);
+ const VkGraphicsPipelineCreateInfo* pipe_cis) const;
void AddFramebufferBinding(CMD_BUFFER_STATE* cb_state, FRAMEBUFFER_STATE* fb_state);
bool ValidateImageBarrierImage(const char* funcName, CMD_BUFFER_STATE const* cb_state, VkFramebuffer framebuffer,
uint32_t active_subpass, const safe_VkSubpassDescription2KHR& sub_desc,
@@ -796,27 +796,27 @@
const VkCopyDescriptorSet* p_cds, const char* func_name);
// Stuff from shader_validation
- bool ValidateGraphicsPipelineShaderState(const PIPELINE_STATE* pPipeline);
- bool ValidateComputePipeline(PIPELINE_STATE* pPipeline);
- bool ValidateRayTracingPipelineNV(PIPELINE_STATE* pipeline);
+ bool ValidateGraphicsPipelineShaderState(const PIPELINE_STATE* pPipeline) const;
+ bool ValidateComputePipeline(PIPELINE_STATE* pPipeline) const;
+ bool ValidateRayTracingPipelineNV(PIPELINE_STATE* pipeline) const;
bool PreCallValidateCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule);
void PreCallRecordCreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule, void* csm_state);
bool ValidatePipelineShaderStage(VkPipelineShaderStageCreateInfo const* pStage, const PIPELINE_STATE* pipeline,
const PIPELINE_STATE::StageState& stage_state, const SHADER_MODULE_STATE* module,
- const spirv_inst_iter& entrypoint, bool check_point_size);
+ const spirv_inst_iter& entrypoint, bool check_point_size) const;
bool ValidatePointListShaderState(const PIPELINE_STATE* pipeline, SHADER_MODULE_STATE const* src, spirv_inst_iter entrypoint,
- VkShaderStageFlagBits stage);
- bool ValidateShaderCapabilities(SHADER_MODULE_STATE const* src, VkShaderStageFlagBits stage);
- bool ValidateShaderStageWritableDescriptor(VkShaderStageFlagBits stage, bool has_writable_descriptor);
+ VkShaderStageFlagBits stage) const;
+ bool ValidateShaderCapabilities(SHADER_MODULE_STATE const* src, VkShaderStageFlagBits stage) const;
+ bool ValidateShaderStageWritableDescriptor(VkShaderStageFlagBits stage, bool has_writable_descriptor) const;
bool ValidateShaderStageInputOutputLimits(SHADER_MODULE_STATE const* src, VkPipelineShaderStageCreateInfo const* pStage,
- const PIPELINE_STATE* pipeline, spirv_inst_iter entrypoint);
+ const PIPELINE_STATE* pipeline, spirv_inst_iter entrypoint) const;
bool ValidateShaderStageGroupNonUniform(SHADER_MODULE_STATE const* src, VkShaderStageFlagBits stage,
- std::unordered_set<uint32_t> const& accessible_ids);
+ std::unordered_set<uint32_t> const& accessible_ids) const;
bool ValidateCooperativeMatrix(SHADER_MODULE_STATE const* src, VkPipelineShaderStageCreateInfo const* pStage,
- const PIPELINE_STATE* pipeline);
- bool ValidateExecutionModes(SHADER_MODULE_STATE const* src, spirv_inst_iter entrypoint);
+ const PIPELINE_STATE* pipeline) const;
+ bool ValidateExecutionModes(SHADER_MODULE_STATE const* src, spirv_inst_iter entrypoint) const;
// Gpu Validation Functions
void GpuPreCallRecordCreateDevice(VkPhysicalDevice gpu, std::unique_ptr<safe_VkDeviceCreateInfo>& modified_create_info,
@@ -1734,7 +1734,7 @@
const VkAllocationCallbacks* pAllocator);
bool PreCallValidateGetBufferDeviceAddressEXT(VkDevice device, const VkBufferDeviceAddressInfoEXT* pInfo);
bool PreCallValidateCmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask);
- bool ValidateComputeWorkGroupSizes(const SHADER_MODULE_STATE* shader);
+ bool ValidateComputeWorkGroupSizes(const SHADER_MODULE_STATE* shader) const;
bool ValidateQueryRange(VkDevice device, VkQueryPool queryPool, uint32_t totalCount, uint32_t firstQuery, uint32_t queryCount,
const char* vuid_badfirst, const char* vuid_badrange);
diff --git a/layers/shader_validation.cpp b/layers/shader_validation.cpp
index edd5182..9461b19 100644
--- a/layers/shader_validation.cpp
+++ b/layers/shader_validation.cpp
@@ -1518,7 +1518,7 @@
return false;
}
-bool CoreChecks::ValidateShaderCapabilities(SHADER_MODULE_STATE const *src, VkShaderStageFlagBits stage) {
+bool CoreChecks::ValidateShaderCapabilities(SHADER_MODULE_STATE const *src, VkShaderStageFlagBits stage) const {
bool skip = false;
struct FeaturePointer {
@@ -1791,7 +1791,7 @@
return skip;
}
-bool CoreChecks::ValidateShaderStageWritableDescriptor(VkShaderStageFlagBits stage, bool has_writable_descriptor) {
+bool CoreChecks::ValidateShaderStageWritableDescriptor(VkShaderStageFlagBits stage, bool has_writable_descriptor) const {
bool skip = false;
if (has_writable_descriptor) {
@@ -1822,7 +1822,7 @@
}
bool CoreChecks::ValidateShaderStageGroupNonUniform(SHADER_MODULE_STATE const *module, VkShaderStageFlagBits stage,
- std::unordered_set<uint32_t> const &accessible_ids) {
+ std::unordered_set<uint32_t> const &accessible_ids) const {
bool skip = false;
auto const subgroup_props = phys_dev_ext_props.subgroup_props;
@@ -1848,7 +1848,7 @@
}
bool CoreChecks::ValidateShaderStageInputOutputLimits(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
- const PIPELINE_STATE *pipeline, spirv_inst_iter entrypoint) {
+ const PIPELINE_STATE *pipeline, spirv_inst_iter entrypoint) const {
if (pStage->stage == VK_SHADER_STAGE_COMPUTE_BIT || pStage->stage == VK_SHADER_STAGE_ALL_GRAPHICS ||
pStage->stage == VK_SHADER_STAGE_ALL) {
return false;
@@ -2101,7 +2101,7 @@
// Validate SPV_NV_cooperative_matrix behavior that can't be statically validated
// in SPIRV-Tools (e.g. due to specialization constant usage).
bool CoreChecks::ValidateCooperativeMatrix(SHADER_MODULE_STATE const *src, VkPipelineShaderStageCreateInfo const *pStage,
- const PIPELINE_STATE *pipeline) {
+ const PIPELINE_STATE *pipeline) const {
bool skip = false;
// Map SPIR-V result ID to specialization constant id (SpecId decoration value)
@@ -2305,7 +2305,7 @@
return skip;
}
-bool CoreChecks::ValidateExecutionModes(SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint) {
+bool CoreChecks::ValidateExecutionModes(SHADER_MODULE_STATE const *src, spirv_inst_iter entrypoint) const {
auto entrypoint_id = entrypoint.word(2);
// The first denorm execution mode encountered, along with its bit width.
@@ -2606,7 +2606,7 @@
// - If shaderTessellationAndGeometryPointSize feature is disabled:
// * gl_PointSize must NOT be written and a default of 1.0 is assumed
bool CoreChecks::ValidatePointListShaderState(const PIPELINE_STATE *pipeline, SHADER_MODULE_STATE const *src,
- spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) {
+ spirv_inst_iter entrypoint, VkShaderStageFlagBits stage) const {
if (pipeline->topology_at_rasterizer != VK_PRIMITIVE_TOPOLOGY_POINT_LIST) {
return false;
}
@@ -2678,7 +2678,7 @@
bool CoreChecks::ValidatePipelineShaderStage(VkPipelineShaderStageCreateInfo const *pStage, const PIPELINE_STATE *pipeline,
const PIPELINE_STATE::StageState &stage_state, const SHADER_MODULE_STATE *module,
- const spirv_inst_iter &entrypoint, bool check_point_size) {
+ const spirv_inst_iter &entrypoint, bool check_point_size) const {
bool skip = false;
// Check the module
@@ -2898,7 +2898,7 @@
// Validate that the shaders used by the given pipeline and store the active_slots
// that are actually used by the pipeline into pPipeline->active_slots
-bool CoreChecks::ValidateGraphicsPipelineShaderState(const PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateGraphicsPipelineShaderState(const PIPELINE_STATE *pipeline) const {
auto pCreateInfo = pipeline->graphicsPipelineCI.ptr();
int vertex_stage = GetShaderStageId(VK_SHADER_STAGE_VERTEX_BIT);
int fragment_stage = GetShaderStageId(VK_SHADER_STAGE_FRAGMENT_BIT);
@@ -2963,7 +2963,7 @@
return skip;
}
-bool CoreChecks::ValidateComputePipeline(PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateComputePipeline(PIPELINE_STATE *pipeline) const {
const auto &stage = *pipeline->computePipelineCI.stage.ptr();
const SHADER_MODULE_STATE *module = GetShaderModuleState(stage.module);
@@ -2972,7 +2972,7 @@
return ValidatePipelineShaderStage(&stage, pipeline, pipeline->stage_state[0], module, entrypoint, false);
}
-bool CoreChecks::ValidateRayTracingPipelineNV(PIPELINE_STATE *pipeline) {
+bool CoreChecks::ValidateRayTracingPipelineNV(PIPELINE_STATE *pipeline) const {
const auto &stage = pipeline->raytracingPipelineCI.ptr()->pStages[0];
const SHADER_MODULE_STATE *module = GetShaderModuleState(stage.module);
@@ -3082,7 +3082,7 @@
shaderModuleMap[*pShaderModule] = std::move(new_shader_module);
}
-bool CoreChecks::ValidateComputeWorkGroupSizes(const SHADER_MODULE_STATE *shader) {
+bool CoreChecks::ValidateComputeWorkGroupSizes(const SHADER_MODULE_STATE *shader) const {
bool skip = false;
uint32_t local_size_x = 0;
uint32_t local_size_y = 0;