layers: Add getBufferNode() helper
Move core_validation bufferMap to use unique_ptr to BUFFER_NODE.
Perform bufferMap look-ups using getBufferNode() helper function.
Update DescriptorSet class to use getBufferNode() helper and store
layer_data instead of bufferMap.
diff --git a/layers/core_validation.cpp b/layers/core_validation.cpp
index 66d3abd..5f60da4 100644
--- a/layers/core_validation.cpp
+++ b/layers/core_validation.cpp
@@ -127,7 +127,7 @@
unordered_map<VkImageView, VkImageViewCreateInfo> imageViewMap;
unordered_map<VkImage, IMAGE_NODE> imageMap;
unordered_map<VkBufferView, VkBufferViewCreateInfo> bufferViewMap;
- unordered_map<VkBuffer, BUFFER_NODE> bufferMap;
+ unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
unordered_map<VkCommandPool, CMD_POOL_INFO> commandPoolMap;
unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
@@ -261,6 +261,15 @@
// TODO : This can be much smarter, using separate locks for separate global data
static std::mutex global_lock;
+// Return buffer node ptr for specified buffer or else NULL
+BUFFER_NODE *getBufferNode(const layer_data *my_data, const VkBuffer buffer) {
+ auto buff_it = my_data->bufferMap.find(buffer);
+ if (buff_it == my_data->bufferMap.end()) {
+ return nullptr;
+ }
+ return buff_it->second.get();
+}
+
static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
@@ -270,9 +279,9 @@
break;
}
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
- auto it = my_data->bufferMap.find(VkBuffer(handle));
- if (it != my_data->bufferMap.end())
- return &(*it).second.mem;
+ auto buff_node = getBufferNode(my_data, VkBuffer(handle));
+ if (buff_node)
+ return &buff_node->mem;
break;
}
default:
@@ -326,9 +335,9 @@
static bool validate_buffer_usage_flags(layer_data *dev_data, VkBuffer buffer, VkFlags desired, VkBool32 strict,
char const *func_name, char const *usage_string) {
bool skipCall = false;
- auto const buffer_node = dev_data->bufferMap.find(buffer);
- if (buffer_node != dev_data->bufferMap.end()) {
- skipCall = validate_usage_flags(dev_data, buffer_node->second.createInfo.usage, desired, strict, (uint64_t)buffer,
+ auto buffer_node = getBufferNode(dev_data, buffer);
+ if (buffer_node) {
+ skipCall = validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict, (uint64_t)buffer,
VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "buffer", func_name, usage_string);
}
return skipCall;
@@ -2623,7 +2632,6 @@
return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
&module, &entrypoint, enabledFeatures, shaderModuleMap);
}
-
// Return Set node ptr for specified set or else NULL
cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, const VkDescriptorSet set) {
auto set_it = my_data->setMap.find(set);
@@ -4047,13 +4055,13 @@
bool skip_call = false;
for (auto drawDataElement : pCB->drawData) {
for (auto buffer : drawDataElement.buffers) {
- auto buffer_data = my_data->bufferMap.find(buffer);
- if (buffer_data == my_data->bufferMap.end()) {
+ auto buffer_node = getBufferNode(my_data, buffer);
+ if (!buffer_node) {
skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
(uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
"Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
} else {
- buffer_data->second.in_use.fetch_add(1);
+ buffer_node->in_use.fetch_add(1);
}
}
}
@@ -4132,9 +4140,9 @@
GLOBAL_CB_NODE *pCB = getCBNode(my_data, cmdBuffer);
for (auto drawDataElement : pCB->drawData) {
for (auto buffer : drawDataElement.buffers) {
- auto buffer_data = my_data->bufferMap.find(buffer);
- if (buffer_data != my_data->bufferMap.end()) {
- buffer_data->second.in_use.fetch_sub(1);
+ auto buffer_node = getBufferNode(my_data, buffer);
+ if (buffer_node) {
+ buffer_node->in_use.fetch_sub(1);
}
}
}
@@ -4940,13 +4948,13 @@
static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
bool skip_call = false;
- auto buffer_data = my_data->bufferMap.find(buffer);
- if (buffer_data == my_data->bufferMap.end()) {
+ auto buffer_node = getBufferNode(my_data, buffer);
+ if (!buffer_node) {
skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
(uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
"Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
} else {
- if (buffer_data->second.in_use.load()) {
+ if (buffer_node->in_use.load()) {
skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
(uint64_t)(buffer), __LINE__, DRAWSTATE_OBJECT_INUSE, "DS",
"Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer.", (uint64_t)(buffer));
@@ -5015,14 +5023,14 @@
lock.lock();
}
// Clean up memory binding and range information for buffer
- const auto &bufferEntry = dev_data->bufferMap.find(buffer);
- if (bufferEntry != dev_data->bufferMap.end()) {
- const auto &memEntry = dev_data->memObjMap.find(bufferEntry->second.mem);
+ auto buff_it = dev_data->bufferMap.find(buffer);
+ if (buff_it != dev_data->bufferMap.end()) {
+ const auto &memEntry = dev_data->memObjMap.find(buff_it->second.get()->mem);
if (memEntry != dev_data->memObjMap.end()) {
- remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), bufferEntry->second.mem, memEntry->second.bufferRanges);
+ remove_memory_ranges(reinterpret_cast<uint64_t &>(buffer), buff_it->second.get()->mem, memEntry->second.bufferRanges);
}
clear_object_binding(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
- dev_data->bufferMap.erase(bufferEntry);
+ dev_data->bufferMap.erase(buff_it);
}
}
@@ -5075,9 +5083,9 @@
uint64_t buffer_handle = (uint64_t)(buffer);
bool skipCall =
set_mem_binding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
- auto buffer_node = dev_data->bufferMap.find(buffer);
- if (buffer_node != dev_data->bufferMap.end()) {
- buffer_node->second.mem = mem;
+ auto buffer_node = getBufferNode(dev_data, buffer);
+ if (buffer_node) {
+ buffer_node->mem = mem;
VkMemoryRequirements memRequirements;
dev_data->device_dispatch_table->GetBufferMemoryRequirements(device, buffer, &memRequirements);
@@ -5101,7 +5109,7 @@
memoryOffset, memRequirements.alignment);
}
// Validate device limits alignments
- VkBufferUsageFlags usage = dev_data->bufferMap[buffer].createInfo.usage;
+ VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
if (usage & (VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) {
if (vk_safe_modulo(memoryOffset, dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment) != 0) {
skipCall |=
@@ -5414,7 +5422,7 @@
if (VK_SUCCESS == result) {
std::lock_guard<std::mutex> lock(global_lock);
// TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
- dev_data->bufferMap.insert(std::make_pair(*pBuffer, BUFFER_NODE(pCreateInfo)));
+ dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_NODE>(new BUFFER_NODE(pCreateInfo))));
}
return result;
}
@@ -5882,7 +5890,7 @@
// All the updates are contained in a single cvdescriptorset function
cvdescriptorset::PerformAllocateDescriptorSets(
pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap, &dev_data->setMap, dev_data,
- dev_data->descriptorSetLayoutMap, dev_data->bufferMap, dev_data->memObjMap, dev_data->bufferViewMap, dev_data->samplerMap,
+ dev_data->descriptorSetLayoutMap, dev_data->memObjMap, dev_data->bufferViewMap, dev_data->samplerMap,
dev_data->imageViewMap, dev_data->imageMap, dev_data->device_extensions.imageToSwapchainMap,
dev_data->device_extensions.swapchainMap);
}
@@ -7697,11 +7705,10 @@
dev_data->phys_dev_properties.queue_family_properties.size());
}
- auto buffer_data = dev_data->bufferMap.find(mem_barrier->buffer);
- if (buffer_data != dev_data->bufferMap.end()) {
- VkDeviceSize buffer_size = (buffer_data->second.createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO)
- ? buffer_data->second.createInfo.size
- : 0;
+ auto buffer_node = getBufferNode(dev_data, mem_barrier->buffer);
+ if (buffer_node) {
+ VkDeviceSize buffer_size =
+ (buffer_node->createInfo.sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO) ? buffer_node->createInfo.size : 0;
if (mem_barrier->offset >= buffer_size) {
skip_call |= log_msg(
dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
diff --git a/layers/core_validation_types.h b/layers/core_validation_types.h
index 901c187..a7b8d92 100644
--- a/layers/core_validation_types.h
+++ b/layers/core_validation_types.h
@@ -62,11 +62,6 @@
std::atomic_int in_use;
};
-namespace core_validation {
-struct layer_data;
-cvdescriptorset::DescriptorSet *getSetNode(const layer_data *, const VkDescriptorSet);
-}
-
struct DESCRIPTOR_POOL_NODE {
VkDescriptorPool pool;
uint32_t maxSets; // Max descriptor sets allowed in this pool
@@ -484,5 +479,11 @@
~GLOBAL_CB_NODE();
};
+// Fwd declarations of layer_data and helpers to look-up state from layer_data maps
+namespace core_validation {
+struct layer_data;
+cvdescriptorset::DescriptorSet *getSetNode(const layer_data *, const VkDescriptorSet);
+BUFFER_NODE *getBufferNode(const layer_data *, const VkBuffer);
+}
#endif // CORE_VALIDATION_TYPES_H_
diff --git a/layers/descriptor_sets.cpp b/layers/descriptor_sets.cpp
index 97e21b5..060506d 100644
--- a/layers/descriptor_sets.cpp
+++ b/layers/descriptor_sets.cpp
@@ -264,7 +264,7 @@
: required_descriptors_by_type{}, layout_nodes(count, nullptr) {}
cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, const DescriptorSetLayout *layout,
- const std::unordered_map<VkBuffer, BUFFER_NODE> *buffer_map,
+ const core_validation::layer_data *dev_data,
const std::unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> *memory_map,
const std::unordered_map<VkBufferView, VkBufferViewCreateInfo> *buffer_view_map,
const std::unordered_map<VkSampler, std::unique_ptr<SAMPLER_NODE>> *sampler_map,
@@ -272,7 +272,7 @@
const std::unordered_map<VkImage, IMAGE_NODE> *image_map,
const std::unordered_map<VkImage, VkSwapchainKHR> *image_to_swapchain_map,
const std::unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> *swapchain_map)
- : some_update_(false), set_(set), p_layout_(layout), buffer_map_(buffer_map), memory_map_(memory_map),
+ : some_update_(false), set_(set), p_layout_(layout), device_data_(dev_data), memory_map_(memory_map),
buffer_view_map_(buffer_view_map), sampler_map_(sampler_map), image_view_map_(image_view_map), image_map_(image_map),
image_to_swapchain_map_(image_to_swapchain_map), swapchain_map_(swapchain_map) {
// Foreach binding, create default descriptors of given type
@@ -362,27 +362,27 @@
if (GeneralBuffer == descriptors_[i]->GetClass()) {
// Verify that buffers are valid
auto buffer = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetBuffer();
- auto buffer_node = buffer_map_->find(buffer);
- if (buffer_node == buffer_map_->end()) {
+ auto buffer_node = getBufferNode(device_data_, buffer);
+ if (!buffer_node) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
<< " references invalid buffer " << buffer << ".";
*error = error_str.str();
return false;
} else {
- auto mem_entry = memory_map_->find(buffer_node->second.mem);
+ auto mem_entry = memory_map_->find(buffer_node->mem);
if (mem_entry == memory_map_->end()) {
std::stringstream error_str;
error_str << "Descriptor in binding #" << binding << " at global descriptor index " << i
- << " uses buffer " << buffer << " that references invalid memory "
- << buffer_node->second.mem << ".";
+ << " uses buffer " << buffer << " that references invalid memory " << buffer_node->mem
+ << ".";
*error = error_str.str();
return false;
}
}
if (descriptors_[i]->IsDynamic()) {
// Validate that dynamic offsets are within the buffer
- auto buffer_size = buffer_node->second.createInfo.size;
+ auto buffer_size = buffer_node->createInfo.size;
auto range = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetRange();
auto desc_offset = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetOffset();
auto dyn_offset = dynamic_offsets[dyn_offset_index++];
@@ -986,15 +986,15 @@
// If there's an error, update the error string with details and return false, else return true
bool cvdescriptorset::DescriptorSet::ValidateBufferUpdate(VkBuffer buffer, VkDescriptorType type, std::string *error) const {
// First make sure that buffer is valid
- auto buff_it = buffer_map_->find(buffer);
- if (buff_it == buffer_map_->end()) {
+ auto buffer_node = getBufferNode(device_data_, buffer);
+ if (!buffer_node) {
std::stringstream error_str;
error_str << "Invalid VkBuffer: " << buffer;
*error = error_str.str();
return false;
}
// Verify that usage bits set correctly for given type
- auto usage = buff_it->second.createInfo.usage;
+ auto usage = buffer_node->createInfo.usage;
std::string error_usage_bit;
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
@@ -1289,7 +1289,6 @@
const AllocateDescriptorSetsData *ds_data, std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> *pool_map,
std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> *set_map, const core_validation::layer_data *dev_data,
const std::unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> &layout_map,
- const std::unordered_map<VkBuffer, BUFFER_NODE> &buffer_map,
const std::unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> &mem_obj_map,
const std::unordered_map<VkBufferView, VkBufferViewCreateInfo> &buffer_view_map,
const std::unordered_map<VkSampler, std::unique_ptr<SAMPLER_NODE>> &sampler_map,
@@ -1307,7 +1306,7 @@
* global map and the pool's set.
*/
for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) {
- auto new_ds = new cvdescriptorset::DescriptorSet(descriptor_sets[i], ds_data->layout_nodes[i], &buffer_map, &mem_obj_map,
+ auto new_ds = new cvdescriptorset::DescriptorSet(descriptor_sets[i], ds_data->layout_nodes[i], dev_data, &mem_obj_map,
&buffer_view_map, &sampler_map, &image_view_map, &image_map,
&image_to_swapchain_map, &swapchain_map);
diff --git a/layers/descriptor_sets.h b/layers/descriptor_sets.h
index b11b5f8..479e347 100644
--- a/layers/descriptor_sets.h
+++ b/layers/descriptor_sets.h
@@ -269,8 +269,7 @@
std::unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> *,
std::unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> *, const core_validation::layer_data *,
const std::unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> &,
- const std::unordered_map<VkBuffer, BUFFER_NODE> &, const std::unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> &,
- const std::unordered_map<VkBufferView, VkBufferViewCreateInfo> &,
+ const std::unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> &, const std::unordered_map<VkBufferView, VkBufferViewCreateInfo> &,
const std::unordered_map<VkSampler, std::unique_ptr<SAMPLER_NODE>> &,
const std::unordered_map<VkImageView, VkImageViewCreateInfo> &, const std::unordered_map<VkImage, IMAGE_NODE> &,
const std::unordered_map<VkImage, VkSwapchainKHR> &, const std::unordered_map<VkSwapchainKHR, SWAPCHAIN_NODE *> &);
@@ -296,7 +295,7 @@
class DescriptorSet : public BASE_NODE {
public:
using BASE_NODE::in_use;
- DescriptorSet(const VkDescriptorSet, const DescriptorSetLayout *, const std::unordered_map<VkBuffer, BUFFER_NODE> *,
+ DescriptorSet(const VkDescriptorSet, const DescriptorSetLayout *, const core_validation::layer_data *,
const std::unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> *,
const std::unordered_map<VkBufferView, VkBufferViewCreateInfo> *,
const std::unordered_map<VkSampler, std::unique_ptr<SAMPLER_NODE>> *,
@@ -378,7 +377,7 @@
std::unordered_set<GLOBAL_CB_NODE *> bound_cmd_buffers_;
std::vector<std::unique_ptr<Descriptor>> descriptors_;
// Ptrs to object containers to verify bound data
- const std::unordered_map<VkBuffer, BUFFER_NODE> *buffer_map_;
+ const core_validation::layer_data *device_data_;
const std::unordered_map<VkDeviceMemory, DEVICE_MEM_INFO> *memory_map_;
const std::unordered_map<VkBufferView, VkBufferViewCreateInfo> *buffer_view_map_;
const std::unordered_map<VkSampler, std::unique_ptr<SAMPLER_NODE>> *sampler_map_;