blob: 03429a1f4b8c1905b399ebf27736a287d97a6034 [file] [log] [blame]
Jeremy Hayese2583052016-12-12 11:01:28 -07001/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06006 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060010 * http://www.apache.org/licenses/LICENSE-2.0
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060012 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
Tobin Ehlisc96f8062016-03-09 16:12:48 -070017 *
18 * Author: Cody Northrop <cnorthrop@google.com>
19 * Author: Michael Lentine <mlentine@google.com>
20 * Author: Tobin Ehlis <tobine@google.com>
21 * Author: Chia-I Wu <olv@google.com>
22 * Author: Chris Forbes <chrisf@ijw.co.nz>
23 * Author: Mark Lobodzinski <mark@lunarg.com>
24 * Author: Ian Elliott <ianelliott@google.com>
Mark Lobodzinski9ea3d382017-01-11 15:25:28 -070025 * Author: Dave Houlton <daveh@lunarg.com>
26 * Author: Dustin Graves <dustin@lunarg.com>
27 * Author: Jeremy Hayes <jeremy@lunarg.com>
28 * Author: Jon Ashburn <jon@lunarg.com>
29 * Author: Karl Schultz <karl@lunarg.com>
30 * Author: Mark Young <marky@lunarg.com>
31 * Author: Mike Schuchardt <mikes@lunarg.com>
32 * Author: Mike Weiblen <mikew@lunarg.com>
33 * Author: Tony Barbour <tony@LunarG.com>
Tobin Ehlisc96f8062016-03-09 16:12:48 -070034 */
35
36// Allow use of STL min and max functions in Windows
37#define NOMINMAX
38
Tobin Ehlisf263ba42016-04-05 13:33:00 -060039#include <SPIRV/spirv.hpp>
40#include <algorithm>
41#include <assert.h>
42#include <iostream>
43#include <list>
44#include <map>
Jeremy Hayesda8797f2016-04-13 16:20:24 -060045#include <mutex>
Tobin Ehlisf263ba42016-04-05 13:33:00 -060046#include <set>
Mark Lobodzinski9acd2e32016-12-21 15:22:39 -070047#include <sstream>
Tobin Ehlisc96f8062016-03-09 16:12:48 -070048#include <stdio.h>
49#include <stdlib.h>
50#include <string.h>
Tobin Ehlisf263ba42016-04-05 13:33:00 -060051#include <string>
Chris Forbes0b03b932016-05-16 14:09:35 +120052#include <tuple>
Tobin Ehlisc96f8062016-03-09 16:12:48 -070053
54#include "vk_loader_platform.h"
55#include "vk_dispatch_table_helper.h"
Mark Lobodzinski9acd2e32016-12-21 15:22:39 -070056#include "vk_enum_string_helper.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070057#if defined(__GNUC__)
58#pragma GCC diagnostic ignored "-Wwrite-strings"
59#endif
60#if defined(__GNUC__)
61#pragma GCC diagnostic warning "-Wwrite-strings"
62#endif
Tobin Ehlisc96f8062016-03-09 16:12:48 -070063#include "core_validation.h"
Mark Lobodzinski42fe5f72017-01-11 11:36:16 -070064#include "buffer_validation.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070065#include "vk_layer_table.h"
66#include "vk_layer_data.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070067#include "vk_layer_extension_utils.h"
68#include "vk_layer_utils.h"
Chris Forbesb4afd0f2016-04-04 10:48:35 +120069#include "spirv-tools/libspirv.h"
Tobin Ehlisc96f8062016-03-09 16:12:48 -070070
71#if defined __ANDROID__
72#include <android/log.h>
73#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
74#else
Michael Lentine860b0fe2016-05-20 10:14:00 -050075#define LOGCONSOLE(...) \
76 { \
77 printf(__VA_ARGS__); \
78 printf("\n"); \
79 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070080#endif
81
Mike Stroyanb985fca2016-11-01 11:50:16 -060082// This intentionally includes a cpp file
83#include "vk_safe_struct.cpp"
84
Mark Muelleraab36502016-05-03 13:17:29 -060085using namespace std;
86
Chia-I Wua67f6842016-05-06 11:20:20 +080087namespace core_validation {
88
Tobin Ehlisc96f8062016-03-09 16:12:48 -070089using std::unordered_map;
90using std::unordered_set;
91
Tobin Ehlisc96f8062016-03-09 16:12:48 -070092// WSI Image Objects bypass usual Image Object creation methods. A special Memory
93// Object value will be used to identify them internally.
94static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
Tobin Ehlis4ff58172016-09-22 10:52:00 -060095// 2nd special memory handle used to flag object as unbound from memory
96static const VkDeviceMemory MEMORY_UNBOUND = VkDeviceMemory(~((uint64_t)(0)) - 1);
Tobin Ehlise54be7b2016-04-11 14:49:55 -060097
Jamie Madill6069c822016-12-15 09:35:36 -050098// A special value of (0xFFFFFFFF, 0xFFFFFFFF) indicates that the surface size will be determined
99// by the extent of a swapchain targeting the surface.
100static const uint32_t kSurfaceSizeFromSwapchain = 0xFFFFFFFFu;
101
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700102struct devExts {
Dustin Graves8f1eab92016-04-05 09:41:17 -0600103 bool wsi_enabled;
Mark Young1a867442016-07-01 15:18:27 -0600104 bool wsi_display_swapchain_enabled;
Tobin Ehlis80c9afa2016-06-02 12:45:31 -0600105 unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700106 unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
107};
108
109// fwd decls
110struct shader_module;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700111
Chris Forbesfb06dd62016-10-03 19:14:25 +1300112struct instance_layer_data {
Chris Forbes5ce44e32016-10-03 17:18:42 +1300113 VkInstance instance = VK_NULL_HANDLE;
Chris Forbes5ce44e32016-10-03 17:18:42 +1300114 debug_report_data *report_data = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700115 std::vector<VkDebugReportCallbackEXT> logging_callback;
Chris Forbes65724852016-10-03 19:54:31 +1300116 VkLayerInstanceDispatchTable dispatch_table;
117
Chris Forbesa13fe522016-10-13 15:34:59 +1300118 CALL_STATE vkEnumeratePhysicalDevicesState = UNCALLED;
119 uint32_t physical_devices_count = 0;
120 CHECK_DISABLED disabled = {};
121
Chris Forbesfb06dd62016-10-03 19:14:25 +1300122 unordered_map<VkPhysicalDevice, PHYSICAL_DEVICE_STATE> physical_device_map;
Chris Forbesf9f87832016-10-04 17:42:54 +1300123 unordered_map<VkSurfaceKHR, SURFACE_STATE> surface_map;
124
125 bool surfaceExtensionEnabled = false;
126 bool displayExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300127 bool androidSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300128 bool mirSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300129 bool waylandSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300130 bool win32SurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300131 bool xcbSurfaceExtensionEnabled = false;
Chris Forbesf9f87832016-10-04 17:42:54 +1300132 bool xlibSurfaceExtensionEnabled = false;
Chris Forbesfb06dd62016-10-03 19:14:25 +1300133};
134
135struct layer_data {
136 debug_report_data *report_data = nullptr;
Chris Forbesaaa9c282016-10-03 20:01:14 +1300137 VkLayerDispatchTable dispatch_table;
Tobin Ehlisf263ba42016-04-05 13:33:00 -0600138
Chris Forbes5ce44e32016-10-03 17:18:42 +1300139 devExts device_extensions = {};
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -0600140 unordered_set<VkQueue> queues; // All queues under given device
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700141 // Global set of all cmdBuffers that are inFlight on this device
142 unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
143 // Layer specific data
Tobin Ehlisfad7adf2016-10-20 06:50:37 -0600144 unordered_map<VkSampler, unique_ptr<SAMPLER_STATE>> samplerMap;
Tobin Ehlis8b26a382016-09-14 08:02:49 -0600145 unordered_map<VkImageView, unique_ptr<IMAGE_VIEW_STATE>> imageViewMap;
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600146 unordered_map<VkImage, unique_ptr<IMAGE_STATE>> imageMap;
Tobin Ehlis8b872462016-09-14 08:12:08 -0600147 unordered_map<VkBufferView, unique_ptr<BUFFER_VIEW_STATE>> bufferViewMap;
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700148 unordered_map<VkBuffer, unique_ptr<BUFFER_STATE>> bufferMap;
Tobin Ehlis52c76a32016-10-12 09:05:51 -0600149 unordered_map<VkPipeline, PIPELINE_STATE *> pipelineMap;
Chris Forbes456c07a2016-06-21 13:04:18 +1200150 unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
Tobin Ehlisbd711bd2016-10-12 14:27:30 -0600151 unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_STATE *> descriptorPoolMap;
Tobin Ehlis05be5df2016-05-05 08:25:02 -0600152 unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
Tobin Ehlis0a43bde2016-05-03 08:31:08 -0600153 unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700154 unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
Tobin Ehlis997b2582016-06-02 08:43:37 -0600155 unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700156 unordered_map<VkFence, FENCE_NODE> fenceMap;
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -0700157 unordered_map<VkQueue, QUEUE_STATE> queueMap;
Tobin Ehlis1af17132016-10-20 14:17:21 -0600158 unordered_map<VkEvent, EVENT_STATE> eventMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700159 unordered_map<QueryObject, bool> queryToStateMap;
160 unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
161 unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
Tobin Ehlis223b01e2016-03-21 14:14:44 -0600162 unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
Tobin Ehlis04c04272016-10-12 11:54:09 -0600163 unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_STATE>> frameBufferMap;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700164 unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
165 unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -0600166 unordered_map<VkRenderPass, unique_ptr<RENDER_PASS_STATE>> renderPassMap;
Chris Forbes90da2e92016-03-18 16:30:03 +1300167 unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
Mark Lobodzinskie6ce3f62016-12-10 10:53:34 -0700168
Chris Forbes5ce44e32016-10-03 17:18:42 +1300169 VkDevice device = VK_NULL_HANDLE;
Mark Lobodzinskicf0f7b62016-11-16 12:12:56 -0700170 VkPhysicalDevice physical_device = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700171
Chris Forbesfb06dd62016-10-03 19:14:25 +1300172 instance_layer_data *instance_data = nullptr; // from device to enclosing instance
Chris Forbes2e6c4942016-10-03 17:44:52 +1300173
Chris Forbes94c5f532016-10-03 17:42:38 +1300174 VkPhysicalDeviceFeatures enabled_features = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700175 // Device specific data
Chris Forbes5ce44e32016-10-03 17:18:42 +1300176 PHYS_DEV_PROPERTIES_NODE phys_dev_properties = {};
177 VkPhysicalDeviceMemoryProperties phys_dev_mem_props = {};
Mark Lobodzinskif6858592017-01-11 09:34:01 -0700178 VkPhysicalDeviceProperties phys_dev_props = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700179};
180
Tobin Ehlise54be7b2016-04-11 14:49:55 -0600181// TODO : Do we need to guard access to layer_data_map w/ lock?
182static unordered_map<void *, layer_data *> layer_data_map;
Chris Forbesfb06dd62016-10-03 19:14:25 +1300183static unordered_map<void *, instance_layer_data *> instance_layer_data_map;
Tobin Ehlise54be7b2016-04-11 14:49:55 -0600184
Chia-I Wud1992742016-05-06 11:36:52 +0800185static const VkLayerProperties global_layer = {
Jon Ashburndc9111c2016-03-22 12:57:13 -0600186 "VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
Chia-I Wud1992742016-05-06 11:36:52 +0800187};
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700188
189template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
190 bool foundLayer = false;
191 for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
Chia-I Wud1992742016-05-06 11:36:52 +0800192 if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700193 foundLayer = true;
194 }
195 // This has to be logged to console as we don't have a callback at this point.
196 if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
197 LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
Chia-I Wud1992742016-05-06 11:36:52 +0800198 global_layer.layerName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700199 }
200 }
201}
202
203// Code imported from shader_checker
204static void build_def_index(shader_module *);
205
206// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
207// without the caller needing to care too much about the physical SPIRV module layout.
208struct spirv_inst_iter {
209 std::vector<uint32_t>::const_iterator zero;
210 std::vector<uint32_t>::const_iterator it;
211
Chris Forbesce7d7e02016-05-11 11:44:12 +1200212 uint32_t len() {
213 auto result = *it >> 16;
214 assert(result > 0);
215 return result;
216 }
217
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700218 uint32_t opcode() { return *it & 0x0ffffu; }
Chris Forbesce7d7e02016-05-11 11:44:12 +1200219
220 uint32_t const &word(unsigned n) {
221 assert(n < len());
222 return it[n];
223 }
224
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700225 uint32_t offset() { return (uint32_t)(it - zero); }
226
227 spirv_inst_iter() {}
228
229 spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
230
231 bool operator==(spirv_inst_iter const &other) { return it == other.it; }
232
233 bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
234
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700235 spirv_inst_iter operator++(int) { // x++
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700236 spirv_inst_iter ii = *this;
237 it += len();
238 return ii;
239 }
240
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700241 spirv_inst_iter operator++() { // ++x;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700242 it += len();
243 return *this;
244 }
245
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700246 // The iterator and the value are the same thing.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700247 spirv_inst_iter &operator*() { return *this; }
248 spirv_inst_iter const &operator*() const { return *this; }
249};
250
251struct shader_module {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700252 // The spirv image itself
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700253 vector<uint32_t> words;
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700254 // A mapping of <id> to the first word of its def. this is useful because walking type
255 // trees, constant expressions, etc requires jumping all over the instruction stream.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700256 unordered_map<unsigned, unsigned> def_index;
257
258 shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
259 : words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
260 def_index() {
261
262 build_def_index(this);
263 }
264
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700265 // Expose begin() / end() to enable range-based for
266 spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } // First insn
267 spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } // Just past last insn
268 // Given an offset into the module, produce an iterator there.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700269 spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
270
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700271 // Gets an iterator to the definition of an id
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700272 spirv_inst_iter get_def(unsigned id) const {
273 auto it = def_index.find(id);
274 if (it == def_index.end()) {
275 return end();
276 }
277 return at(it->second);
278 }
279};
280
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700281// TODO : This can be much smarter, using separate locks for separate global data
Jeremy Hayesda8797f2016-04-13 16:20:24 -0600282static std::mutex global_lock;
Tobin Ehlisd9867fc2016-05-12 16:57:14 -0600283
Tobin Ehlis8b26a382016-09-14 08:02:49 -0600284// Return IMAGE_VIEW_STATE ptr for specified imageView or else NULL
285IMAGE_VIEW_STATE *getImageViewState(const layer_data *dev_data, VkImageView image_view) {
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -0600286 auto iv_it = dev_data->imageViewMap.find(image_view);
287 if (iv_it == dev_data->imageViewMap.end()) {
288 return nullptr;
289 }
290 return iv_it->second.get();
291}
Tobin Ehlise2f80292016-06-02 10:08:53 -0600292// Return sampler node ptr for specified sampler or else NULL
Tobin Ehlisfad7adf2016-10-20 06:50:37 -0600293SAMPLER_STATE *getSamplerState(const layer_data *dev_data, VkSampler sampler) {
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -0600294 auto sampler_it = dev_data->samplerMap.find(sampler);
295 if (sampler_it == dev_data->samplerMap.end()) {
Tobin Ehlise2f80292016-06-02 10:08:53 -0600296 return nullptr;
297 }
298 return sampler_it->second.get();
299}
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700300// Return image state ptr for specified image or else NULL
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600301IMAGE_STATE *getImageState(const layer_data *dev_data, VkImage image) {
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -0600302 auto img_it = dev_data->imageMap.find(image);
303 if (img_it == dev_data->imageMap.end()) {
304 return nullptr;
305 }
306 return img_it->second.get();
307}
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700308// Return buffer state ptr for specified buffer or else NULL
309BUFFER_STATE *getBufferState(const layer_data *dev_data, VkBuffer buffer) {
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -0600310 auto buff_it = dev_data->bufferMap.find(buffer);
311 if (buff_it == dev_data->bufferMap.end()) {
Tobin Ehlis94bc5d22016-06-02 07:46:52 -0600312 return nullptr;
313 }
314 return buff_it->second.get();
315}
Tobin Ehlis4e380592016-06-02 12:41:47 -0600316// Return swapchain node for specified swapchain or else NULL
Tobin Ehlise83a46a2016-06-02 12:48:25 -0600317SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
Tobin Ehlis4e380592016-06-02 12:41:47 -0600318 auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
319 if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
320 return nullptr;
321 }
Tobin Ehlis80c9afa2016-06-02 12:45:31 -0600322 return swp_it->second.get();
Tobin Ehlis4e380592016-06-02 12:41:47 -0600323}
Tobin Ehlis969a5262016-06-02 12:13:32 -0600324// Return swapchain for specified image or else NULL
Tobin Ehlise83a46a2016-06-02 12:48:25 -0600325VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
Tobin Ehlis969a5262016-06-02 12:13:32 -0600326 auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
327 if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
328 return VK_NULL_HANDLE;
329 }
330 return img_it->second;
331}
Tobin Ehlis424859c2016-06-02 09:43:11 -0600332// Return buffer node ptr for specified buffer or else NULL
Tobin Ehlis8b872462016-09-14 08:12:08 -0600333BUFFER_VIEW_STATE *getBufferViewState(const layer_data *my_data, VkBufferView buffer_view) {
Tobin Ehlis424859c2016-06-02 09:43:11 -0600334 auto bv_it = my_data->bufferViewMap.find(buffer_view);
335 if (bv_it == my_data->bufferViewMap.end()) {
336 return nullptr;
337 }
338 return bv_it->second.get();
339}
Tobin Ehlis94bc5d22016-06-02 07:46:52 -0600340
Chris Forbesa70b6e12016-06-10 15:21:43 +1200341FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
342 auto it = dev_data->fenceMap.find(fence);
343 if (it == dev_data->fenceMap.end()) {
344 return nullptr;
345 }
346 return &it->second;
347}
348
Tobin Ehlis1af17132016-10-20 14:17:21 -0600349EVENT_STATE *getEventNode(layer_data *dev_data, VkEvent event) {
Tobin Ehliscab6b7d2016-07-07 16:47:10 -0600350 auto it = dev_data->eventMap.find(event);
351 if (it == dev_data->eventMap.end()) {
352 return nullptr;
353 }
354 return &it->second;
355}
356
Tobin Ehlis2e8f5322016-07-08 14:22:01 -0600357QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
358 auto it = dev_data->queryPoolMap.find(query_pool);
359 if (it == dev_data->queryPoolMap.end()) {
360 return nullptr;
361 }
362 return &it->second;
363}
364
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -0700365QUEUE_STATE *getQueueState(layer_data *dev_data, VkQueue queue) {
Chris Forbesa70b6e12016-06-10 15:21:43 +1200366 auto it = dev_data->queueMap.find(queue);
367 if (it == dev_data->queueMap.end()) {
368 return nullptr;
369 }
370 return &it->second;
371}
372
Chris Forbes28b668e2016-06-16 12:17:09 +1200373SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
374 auto it = dev_data->semaphoreMap.find(semaphore);
375 if (it == dev_data->semaphoreMap.end()) {
376 return nullptr;
377 }
378 return &it->second;
379}
380
Chris Forbes456c07a2016-06-21 13:04:18 +1200381COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
382 auto it = dev_data->commandPoolMap.find(pool);
383 if (it == dev_data->commandPoolMap.end()) {
384 return nullptr;
385 }
386 return &it->second;
387}
Chris Forbes8245eba2016-10-03 17:36:32 +1300388
Chris Forbesfb06dd62016-10-03 19:14:25 +1300389PHYSICAL_DEVICE_STATE *getPhysicalDeviceState(instance_layer_data *instance_data, VkPhysicalDevice phys) {
390 auto it = instance_data->physical_device_map.find(phys);
391 if (it == instance_data->physical_device_map.end()) {
Chris Forbes8245eba2016-10-03 17:36:32 +1300392 return nullptr;
393 }
394 return &it->second;
395}
396
Chris Forbesf9f87832016-10-04 17:42:54 +1300397SURFACE_STATE *getSurfaceState(instance_layer_data *instance_data, VkSurfaceKHR surface) {
398 auto it = instance_data->surface_map.find(surface);
399 if (it == instance_data->surface_map.end()) {
400 return nullptr;
401 }
402 return &it->second;
403}
404
Tobin Ehlise89829a2016-10-11 17:29:32 -0600405// Return ptr to memory binding for given handle of specified type
406static BINDABLE *GetObjectMemBinding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700407 switch (type) {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600408 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
409 return getImageState(my_data, VkImage(handle));
410 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700411 return getBufferState(my_data, VkBuffer(handle));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700412 default:
413 break;
414 }
Tobin Ehlisf263ba42016-04-05 13:33:00 -0600415 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700416}
Tobin Ehlis223b01e2016-03-21 14:14:44 -0600417// prototype
Chris Forbes664ca7f2016-05-06 16:55:18 +1200418static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
Tobin Ehlis223b01e2016-03-21 14:14:44 -0600419
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700420// Helper function to validate correct usage bits set for buffers or images
421// Verify that (actual & desired) flags != 0 or,
422// if strict is true, verify that (actual & desired) flags == desired
423// In case of error, report it via dbg callbacks
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700424static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict, uint64_t obj_handle,
425 VkDebugReportObjectTypeEXT obj_type, int32_t const msgCode, char const *ty_str,
426 char const *func_name, char const *usage_str) {
Dustin Graves8f1eab92016-04-05 09:41:17 -0600427 bool correct_usage = false;
Tobin Ehlisfe871282016-06-28 10:28:02 -0600428 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700429 if (strict)
430 correct_usage = ((actual & desired) == desired);
431 else
432 correct_usage = ((actual & desired) != 0);
433 if (!correct_usage) {
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700434 if (msgCode == -1) {
435 // TODO: Fix callers with msgCode == -1 to use correct validation checks.
436 skip_call =
437 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
438 MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
439 " used by %s. In this case, %s should have %s set during creation.",
440 ty_str, obj_handle, func_name, ty_str, usage_str);
441 } else {
442 const char *valid_usage = (msgCode == -1) ? "" : validation_error_map[msgCode];
443 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__, msgCode, "MEM",
444 "Invalid usage flag for %s 0x%" PRIxLEAST64
445 " used by %s. In this case, %s should have %s set during creation. %s",
446 ty_str, obj_handle, func_name, ty_str, usage_str, valid_usage);
447 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700448 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600449 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700450}
451
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700452// Helper function to validate usage flags for buffers
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700453// For given buffer_state send actual vs. desired usage off to helper above where
Tobin Ehlis5611e922016-06-28 15:52:55 -0600454// an error will be flagged if usage is not correct
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600455static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_STATE const *image_state, VkFlags desired, VkBool32 strict,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700456 int32_t const msgCode, char const *func_name, char const *usage_string) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600457 return validate_usage_flags(dev_data, image_state->createInfo.usage, desired, strict,
458 reinterpret_cast<const uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700459 msgCode, "image", func_name, usage_string);
Tobin Ehlis5611e922016-06-28 15:52:55 -0600460}
461
462// Helper function to validate usage flags for buffers
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700463// For given buffer_state send actual vs. desired usage off to helper above where
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600464// an error will be flagged if usage is not correct
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700465static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_STATE const *buffer_state, VkFlags desired, VkBool32 strict,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700466 int32_t const msgCode, char const *func_name, char const *usage_string) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700467 return validate_usage_flags(dev_data, buffer_state->createInfo.usage, desired, strict,
468 reinterpret_cast<const uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -0700469 msgCode, "buffer", func_name, usage_string);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600470}
471
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700472// Return ptr to info in map container containing mem, or NULL if not found
473// Calls to this function should be wrapped in mutex
Tobin Ehlis997b2582016-06-02 08:43:37 -0600474DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
475 auto mem_it = dev_data->memObjMap.find(mem);
476 if (mem_it == dev_data->memObjMap.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700477 return NULL;
478 }
Tobin Ehlis997b2582016-06-02 08:43:37 -0600479 return mem_it->second.get();
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700480}
481
482static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
483 const VkMemoryAllocateInfo *pAllocateInfo) {
484 assert(object != NULL);
485
Tobin Ehlis997b2582016-06-02 08:43:37 -0600486 my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700487}
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600488
489// Helper function to print lowercase string of object type
490// TODO: Unify string helper functions, this should really come out of a string helper if not there already
491static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
492 switch (type) {
493 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
494 return "image";
495 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
496 return "buffer";
Tobin Ehlis83e14ca2016-09-14 11:21:55 -0600497 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT:
498 return "image view";
499 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
500 return "buffer view";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600501 case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
502 return "swapchain";
503 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
504 return "descriptor set";
505 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
Tobin Ehlisb0fcdfa2016-09-06 20:37:39 -0600506 return "framebuffer";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600507 case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
508 return "event";
509 case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
510 return "query pool";
Tobin Ehlis83e14ca2016-09-14 11:21:55 -0600511 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT:
512 return "descriptor pool";
513 case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT:
514 return "command pool";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600515 case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
516 return "pipeline";
517 case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
518 return "sampler";
Tobin Ehlis83e14ca2016-09-14 11:21:55 -0600519 case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT:
520 return "renderpass";
521 case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT:
522 return "device memory";
Tobin Ehlis7d1dd142016-08-18 08:23:30 -0600523 case VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT:
524 return "semaphore";
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600525 default:
526 return "unknown";
527 }
528}
529
Tobin Ehlis3d2c3162016-08-10 16:08:00 -0600530// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
Tobin Ehlis5d461152016-08-10 19:11:54 -0600531static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600532 VkDebugReportObjectTypeEXT type, const char *functionName) {
Tobin Ehlis5d461152016-08-10 19:11:54 -0600533 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
534 if (mem_info) {
535 if (!mem_info->bound_ranges[bound_object_handle].valid) {
Karl Schultze52f0142016-09-29 16:11:35 -0600536 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
Tobin Ehlisd28978b2016-09-06 18:46:55 -0600537 reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600538 "%s: Cannot read invalid region of memory allocation 0x%" PRIx64 " for bound %s object 0x%" PRIx64
539 ", please fill the memory before using.",
540 functionName, reinterpret_cast<uint64_t &>(mem), object_type_to_string(type), bound_object_handle);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700541 }
542 }
543 return false;
544}
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600545// For given image_state
546// If mem is special swapchain key, then verify that image_state valid member is true
Tobin Ehlis5d461152016-08-10 19:11:54 -0600547// Else verify that the image's bound memory range is valid
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600548static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_STATE *image_state, const char *functionName) {
Tobin Ehlis54108272016-10-11 14:26:49 -0600549 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600550 if (!image_state->valid) {
Karl Schultze52f0142016-09-29 16:11:35 -0600551 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
Tobin Ehlis54108272016-10-11 14:26:49 -0600552 reinterpret_cast<uint64_t &>(image_state->binding.mem), __LINE__, MEMTRACK_INVALID_MEM_REGION, "MEM",
Tobin Ehlis5d461152016-08-10 19:11:54 -0600553 "%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600554 functionName, reinterpret_cast<uint64_t &>(image_state->image));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700555 }
556 } else {
Tobin Ehlis54108272016-10-11 14:26:49 -0600557 return ValidateMemoryIsValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image),
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600558 VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, functionName);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600559 }
560 return false;
561}
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700562// For given buffer_state, verify that the range it's bound to is valid
563static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_STATE *buffer_state, const char *functionName) {
564 return ValidateMemoryIsValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer),
Tobin Ehlis7edb0a92016-09-06 18:12:07 -0600565 VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, functionName);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600566}
567// For the given memory allocation, set the range bound by the given handle object to the valid param value
568static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
569 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
570 if (mem_info) {
571 mem_info->bound_ranges[handle].valid = valid;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700572 }
573}
Tobin Ehlis5d461152016-08-10 19:11:54 -0600574// For given image node
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600575// If mem is special swapchain key, then set entire image_state to valid param value
Tobin Ehlis5d461152016-08-10 19:11:54 -0600576// Else set the image's bound memory range to valid param value
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600577static void SetImageMemoryValid(layer_data *dev_data, IMAGE_STATE *image_state, bool valid) {
Tobin Ehlis54108272016-10-11 14:26:49 -0600578 if (image_state->binding.mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600579 image_state->valid = valid;
Tobin Ehlis5d461152016-08-10 19:11:54 -0600580 } else {
Tobin Ehlis54108272016-10-11 14:26:49 -0600581 SetMemoryValid(dev_data, image_state->binding.mem, reinterpret_cast<uint64_t &>(image_state->image), valid);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600582 }
583}
584// For given buffer node set the buffer's bound memory range to valid param value
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700585static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_STATE *buffer_state, bool valid) {
586 SetMemoryValid(dev_data, buffer_state->binding.mem, reinterpret_cast<uint64_t &>(buffer_state->buffer), valid);
Tobin Ehlis5d461152016-08-10 19:11:54 -0600587}
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700588// Find CB Info and add mem reference to list container
589// Find Mem Obj Info and add CB reference to list container
Dustin Graves8f1eab92016-04-05 09:41:17 -0600590static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
591 const char *apiName) {
Tobin Ehlisfe871282016-06-28 10:28:02 -0600592 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700593
594 // Skip validation if this image was created through WSI
595 if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
596
597 // First update CB binding in MemObj mini CB list
Tobin Ehlis997b2582016-06-02 08:43:37 -0600598 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700599 if (pMemInfo) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700600 // Now update CBInfo's Mem reference list
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600601 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, cb);
602 pMemInfo->cb_bindings.insert(cb_node);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700603 // TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600604 if (cb_node) {
605 cb_node->memObjs.insert(mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700606 }
607 }
608 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600609 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700610}
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600611
Tobin Ehlis8020eea2016-08-17 11:10:41 -0600612// Create binding link between given sampler and command buffer node
Tobin Ehlisfad7adf2016-10-20 06:50:37 -0600613void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_STATE *sampler_state) {
614 sampler_state->cb_bindings.insert(cb_node);
615 cb_node->object_bindings.insert(
616 {reinterpret_cast<uint64_t &>(sampler_state->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
Tobin Ehlis8020eea2016-08-17 11:10:41 -0600617}
618
619// Create binding link between given image node and command buffer node
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600620void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_STATE *image_state) {
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600621 // Skip validation if this image was created through WSI
Tobin Ehlis54108272016-10-11 14:26:49 -0600622 if (image_state->binding.mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600623 // First update CB binding in MemObj mini CB list
Tobin Ehlis640a81c2016-11-15 15:37:18 -0700624 for (auto mem_binding : image_state->GetBoundMemory()) {
625 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
626 if (pMemInfo) {
627 pMemInfo->cb_bindings.insert(cb_node);
628 // Now update CBInfo's Mem reference list
629 cb_node->memObjs.insert(mem_binding);
630 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600631 }
Tobin Ehlis6fcd6e42016-09-21 14:28:42 -0600632 // Now update cb binding for image
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600633 cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(image_state->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
634 image_state->cb_bindings.insert(cb_node);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600635 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600636}
637
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600638// Create binding link between given image view node and its image with command buffer node
639void AddCommandBufferBindingImageView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_VIEW_STATE *view_state) {
640 // First add bindings for imageView
641 view_state->cb_bindings.insert(cb_node);
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600642 cb_node->object_bindings.insert(
643 {reinterpret_cast<uint64_t &>(view_state->image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT});
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600644 auto image_state = getImageState(dev_data, view_state->create_info.image);
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600645 // Add bindings for image within imageView
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600646 if (image_state) {
647 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
Tobin Ehlis15b8ea02016-09-19 14:02:58 -0600648 }
649}
650
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600651// Create binding link between given buffer node and command buffer node
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700652void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_STATE *buffer_state) {
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600653 // First update CB binding in MemObj mini CB list
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700654 for (auto mem_binding : buffer_state->GetBoundMemory()) {
Tobin Ehlis640a81c2016-11-15 15:37:18 -0700655 DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem_binding);
656 if (pMemInfo) {
657 pMemInfo->cb_bindings.insert(cb_node);
658 // Now update CBInfo's Mem reference list
659 cb_node->memObjs.insert(mem_binding);
660 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600661 }
662 // Now update cb binding for buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700663 cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buffer_state->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
664 buffer_state->cb_bindings.insert(cb_node);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -0600665}
666
Tobin Ehlis2515c0e2016-09-28 07:12:28 -0600667// Create binding link between given buffer view node and its buffer with command buffer node
668void AddCommandBufferBindingBufferView(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_VIEW_STATE *view_state) {
669 // First add bindings for bufferView
670 view_state->cb_bindings.insert(cb_node);
671 cb_node->object_bindings.insert(
672 {reinterpret_cast<uint64_t &>(view_state->buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT});
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700673 auto buffer_state = getBufferState(dev_data, view_state->create_info.buffer);
Tobin Ehlis2515c0e2016-09-28 07:12:28 -0600674 // Add bindings for buffer within bufferView
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700675 if (buffer_state) {
676 AddCommandBufferBindingBuffer(dev_data, cb_node, buffer_state);
Tobin Ehlis2515c0e2016-09-28 07:12:28 -0600677 }
678}
679
Tobin Ehlis4c522322016-04-11 16:39:29 -0600680// For every mem obj bound to particular CB, free bindings related to that CB
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600681static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
682 if (cb_node) {
683 if (cb_node->memObjs.size() > 0) {
684 for (auto mem : cb_node->memObjs) {
Tobin Ehlis997b2582016-06-02 08:43:37 -0600685 DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700686 if (pInfo) {
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600687 pInfo->cb_bindings.erase(cb_node);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700688 }
689 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600690 cb_node->memObjs.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700691 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -0600692 cb_node->validate_functions.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700693 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700694}
Tobin Ehlis4c522322016-04-11 16:39:29 -0600695// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
696static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
697 clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700698}
699
Tobin Ehlise89829a2016-10-11 17:29:32 -0600700// Clear a single object binding from given memory object, or report error if binding is missing
701static bool ClearMemoryObjectBinding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory mem) {
702 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
703 // This obj is bound to a memory object. Remove the reference to this object in that memory object's list
Jeremy Hayes971ba4c2016-12-01 11:39:42 -0700704 if (mem_info) {
705 mem_info->obj_bindings.erase({handle, type});
Tobin Ehlise89829a2016-10-11 17:29:32 -0600706 }
707 return false;
708}
709
710// ClearMemoryObjectBindings clears the binding of objects to memory
711// For the given object it pulls the memory bindings and makes sure that the bindings
712// no longer refer to the object being cleared. This occurs when objects are destroyed.
713static bool ClearMemoryObjectBindings(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
714 bool skip = false;
715 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
716 if (mem_binding) {
717 if (!mem_binding->sparse) {
718 skip = ClearMemoryObjectBinding(dev_data, handle, type, mem_binding->binding.mem);
719 } else { // Sparse, clear all bindings
720 for (auto& sparse_mem_binding : mem_binding->sparse_bindings) {
721 skip |= ClearMemoryObjectBinding(dev_data, handle, type, sparse_mem_binding.mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700722 }
723 }
724 }
Tobin Ehlise89829a2016-10-11 17:29:32 -0600725 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700726}
727
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600728// For given mem object, verify that it is not null or UNBOUND, if it is, report error. Return skip value.
729bool VerifyBoundMemoryIsValid(const layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, const char *api_name,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700730 const char *type_name, UNIQUE_VALIDATION_ERROR_CODE error_code) {
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600731 bool result = false;
732 if (VK_NULL_HANDLE == mem) {
733 result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700734 __LINE__, error_code, "MEM",
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600735 "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound. Memory should be bound by calling "
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700736 "vkBind%sMemory(). %s",
737 api_name, type_name, handle, type_name, validation_error_map[error_code]);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600738 } else if (MEMORY_UNBOUND == mem) {
739 result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, handle,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700740 __LINE__, error_code, "MEM",
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600741 "%s: Vk%s object 0x%" PRIxLEAST64 " used with no memory bound and previously bound memory was freed. "
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700742 "Memory must not be freed prior to this operation. %s",
743 api_name, type_name, handle, validation_error_map[error_code]);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600744 }
745 return result;
746}
747
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600748// Check to see if memory was ever bound to this image
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700749bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_STATE *image_state, const char *api_name,
750 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600751 bool result = false;
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600752 if (0 == (static_cast<uint32_t>(image_state->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700753 result = VerifyBoundMemoryIsValid(dev_data, image_state->binding.mem,
754 reinterpret_cast<const uint64_t &>(image_state->image), api_name, "Image", error_code);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600755 }
756 return result;
757}
758
759// Check to see if memory was bound to this buffer
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700760bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_STATE *buffer_state, const char *api_name,
761 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600762 bool result = false;
Tobin Ehlis4668dce2016-11-16 09:30:23 -0700763 if (0 == (static_cast<uint32_t>(buffer_state->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
764 result = VerifyBoundMemoryIsValid(dev_data, buffer_state->binding.mem,
Tobin Ehlise1995fc2016-12-22 12:45:09 -0700765 reinterpret_cast<const uint64_t &>(buffer_state->buffer), api_name, "Buffer", error_code);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -0600766 }
767 return result;
768}
769
Tobin Ehlise89829a2016-10-11 17:29:32 -0600770// SetMemBinding is used to establish immutable, non-sparse binding between a single image/buffer object and memory object
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700771// For NULL mem case, output warning
772// Make sure given object is in global object map
773// IF a previous binding existed, output validation error
774// Otherwise, add reference from objectInfo to memoryInfo
775// Add reference off of objInfo
Jeremy Hayese2583052016-12-12 11:01:28 -0700776// TODO: We may need to refactor or pass in multiple valid usage statements to handle multiple valid usage conditions.
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600777static bool SetMemBinding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, VkDebugReportObjectTypeEXT type,
778 const char *apiName) {
Tobin Ehlisfe871282016-06-28 10:28:02 -0600779 bool skip_call = false;
Tobin Ehlise89829a2016-10-11 17:29:32 -0600780 // It's an error to bind an object to NULL memory
Jeremy Hayes35fe1722016-12-13 13:09:44 -0700781 if (mem != VK_NULL_HANDLE) {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600782 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600783 assert(mem_binding);
Tobin Ehlise89829a2016-10-11 17:29:32 -0600784 // TODO : Add check here to make sure object isn't sparse
785 // VALIDATION_ERROR_00792 for buffers
786 // VALIDATION_ERROR_00804 for images
787 assert(!mem_binding->sparse);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600788 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
789 if (mem_info) {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600790 DEVICE_MEM_INFO *prev_binding = getMemObjInfo(dev_data, mem_binding->binding.mem);
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600791 if (prev_binding) {
Jeremy Hayese2583052016-12-12 11:01:28 -0700792 // TODO: VALIDATION_ERROR_00791 and VALIDATION_ERROR_00803
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600793 skip_call |=
794 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
795 reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
796 "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
797 ") which has already been bound to mem object 0x%" PRIxLEAST64,
798 apiName, reinterpret_cast<uint64_t &>(mem), handle, reinterpret_cast<uint64_t &>(prev_binding->mem));
Tobin Ehlise89829a2016-10-11 17:29:32 -0600799 } else if (mem_binding->binding.mem == MEMORY_UNBOUND) {
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600800 skip_call |=
801 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
802 reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_REBIND_OBJECT, "MEM",
803 "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
804 ") which was previous bound to memory that has since been freed. Memory bindings are immutable in "
805 "Vulkan so this attempt to bind to new memory is not allowed.",
806 apiName, reinterpret_cast<uint64_t &>(mem), handle);
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600807 } else {
Tobin Ehlis4ff58172016-09-22 10:52:00 -0600808 mem_info->obj_bindings.insert({handle, type});
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600809 // For image objects, make sure default memory state is correctly set
810 // TODO : What's the best/correct way to handle this?
811 if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -0600812 auto const image_state = getImageState(dev_data, VkImage(handle));
813 if (image_state) {
814 VkImageCreateInfo ici = image_state->createInfo;
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600815 if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
816 // TODO:: More memory state transition stuff.
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700817 }
818 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700819 }
Tobin Ehlise89829a2016-10-11 17:29:32 -0600820 mem_binding->binding.mem = mem;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700821 }
822 }
823 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600824 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700825}
826
827// For NULL mem case, clear any previous binding Else...
828// Make sure given object is in its object map
829// IF a previous binding existed, update binding
830// Add reference from objectInfo to memoryInfo
831// Add reference off of object's binding info
Chris Forbes73b82b12016-04-06 15:16:26 +1200832// Return VK_TRUE if addition is successful, VK_FALSE otherwise
Tobin Ehlise89829a2016-10-11 17:29:32 -0600833static bool SetSparseMemBinding(layer_data *dev_data, MEM_BINDING binding, uint64_t handle, VkDebugReportObjectTypeEXT type,
834 const char *apiName) {
Tobin Ehlisfe871282016-06-28 10:28:02 -0600835 bool skip_call = VK_FALSE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700836 // Handle NULL case separately, just clear previous binding & decrement reference
Tobin Ehlise89829a2016-10-11 17:29:32 -0600837 if (binding.mem == VK_NULL_HANDLE) {
838 // TODO : This should cause the range of the resource to be unbound according to spec
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700839 } else {
Tobin Ehlise89829a2016-10-11 17:29:32 -0600840 BINDABLE *mem_binding = GetObjectMemBinding(dev_data, handle, type);
841 assert(mem_binding);
842 assert(mem_binding->sparse);
843 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, binding.mem);
844 if (mem_info) {
845 mem_info->obj_bindings.insert({handle, type});
Tobin Ehlisd5b56ea2016-05-11 14:40:44 -0600846 // Need to set mem binding for this object
Tobin Ehlise89829a2016-10-11 17:29:32 -0600847 mem_binding->sparse_bindings.insert(binding);
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700848 }
849 }
Tobin Ehlisfe871282016-06-28 10:28:02 -0600850 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700851}
852
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700853// Return a string representation of CMD_TYPE enum
854static string cmdTypeToString(CMD_TYPE cmd) {
855 switch (cmd) {
856 case CMD_BINDPIPELINE:
857 return "CMD_BINDPIPELINE";
858 case CMD_BINDPIPELINEDELTA:
859 return "CMD_BINDPIPELINEDELTA";
860 case CMD_SETVIEWPORTSTATE:
861 return "CMD_SETVIEWPORTSTATE";
862 case CMD_SETLINEWIDTHSTATE:
863 return "CMD_SETLINEWIDTHSTATE";
864 case CMD_SETDEPTHBIASSTATE:
865 return "CMD_SETDEPTHBIASSTATE";
866 case CMD_SETBLENDSTATE:
867 return "CMD_SETBLENDSTATE";
868 case CMD_SETDEPTHBOUNDSSTATE:
869 return "CMD_SETDEPTHBOUNDSSTATE";
870 case CMD_SETSTENCILREADMASKSTATE:
871 return "CMD_SETSTENCILREADMASKSTATE";
872 case CMD_SETSTENCILWRITEMASKSTATE:
873 return "CMD_SETSTENCILWRITEMASKSTATE";
874 case CMD_SETSTENCILREFERENCESTATE:
875 return "CMD_SETSTENCILREFERENCESTATE";
876 case CMD_BINDDESCRIPTORSETS:
877 return "CMD_BINDDESCRIPTORSETS";
878 case CMD_BINDINDEXBUFFER:
879 return "CMD_BINDINDEXBUFFER";
880 case CMD_BINDVERTEXBUFFER:
881 return "CMD_BINDVERTEXBUFFER";
882 case CMD_DRAW:
883 return "CMD_DRAW";
884 case CMD_DRAWINDEXED:
885 return "CMD_DRAWINDEXED";
886 case CMD_DRAWINDIRECT:
887 return "CMD_DRAWINDIRECT";
888 case CMD_DRAWINDEXEDINDIRECT:
889 return "CMD_DRAWINDEXEDINDIRECT";
890 case CMD_DISPATCH:
891 return "CMD_DISPATCH";
892 case CMD_DISPATCHINDIRECT:
893 return "CMD_DISPATCHINDIRECT";
894 case CMD_COPYBUFFER:
895 return "CMD_COPYBUFFER";
896 case CMD_COPYIMAGE:
897 return "CMD_COPYIMAGE";
898 case CMD_BLITIMAGE:
899 return "CMD_BLITIMAGE";
900 case CMD_COPYBUFFERTOIMAGE:
901 return "CMD_COPYBUFFERTOIMAGE";
902 case CMD_COPYIMAGETOBUFFER:
903 return "CMD_COPYIMAGETOBUFFER";
904 case CMD_CLONEIMAGEDATA:
905 return "CMD_CLONEIMAGEDATA";
906 case CMD_UPDATEBUFFER:
907 return "CMD_UPDATEBUFFER";
908 case CMD_FILLBUFFER:
909 return "CMD_FILLBUFFER";
910 case CMD_CLEARCOLORIMAGE:
911 return "CMD_CLEARCOLORIMAGE";
912 case CMD_CLEARATTACHMENTS:
913 return "CMD_CLEARCOLORATTACHMENT";
914 case CMD_CLEARDEPTHSTENCILIMAGE:
915 return "CMD_CLEARDEPTHSTENCILIMAGE";
916 case CMD_RESOLVEIMAGE:
917 return "CMD_RESOLVEIMAGE";
918 case CMD_SETEVENT:
919 return "CMD_SETEVENT";
920 case CMD_RESETEVENT:
921 return "CMD_RESETEVENT";
922 case CMD_WAITEVENTS:
923 return "CMD_WAITEVENTS";
924 case CMD_PIPELINEBARRIER:
925 return "CMD_PIPELINEBARRIER";
926 case CMD_BEGINQUERY:
927 return "CMD_BEGINQUERY";
928 case CMD_ENDQUERY:
929 return "CMD_ENDQUERY";
930 case CMD_RESETQUERYPOOL:
931 return "CMD_RESETQUERYPOOL";
932 case CMD_COPYQUERYPOOLRESULTS:
933 return "CMD_COPYQUERYPOOLRESULTS";
934 case CMD_WRITETIMESTAMP:
935 return "CMD_WRITETIMESTAMP";
936 case CMD_INITATOMICCOUNTERS:
937 return "CMD_INITATOMICCOUNTERS";
938 case CMD_LOADATOMICCOUNTERS:
939 return "CMD_LOADATOMICCOUNTERS";
940 case CMD_SAVEATOMICCOUNTERS:
941 return "CMD_SAVEATOMICCOUNTERS";
942 case CMD_BEGINRENDERPASS:
943 return "CMD_BEGINRENDERPASS";
944 case CMD_ENDRENDERPASS:
945 return "CMD_ENDRENDERPASS";
946 default:
947 return "UNKNOWN";
948 }
949}
950
951// SPIRV utility functions
952static void build_def_index(shader_module *module) {
953 for (auto insn : *module) {
954 switch (insn.opcode()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700955 // Types
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700956 case spv::OpTypeVoid:
957 case spv::OpTypeBool:
958 case spv::OpTypeInt:
959 case spv::OpTypeFloat:
960 case spv::OpTypeVector:
961 case spv::OpTypeMatrix:
962 case spv::OpTypeImage:
963 case spv::OpTypeSampler:
964 case spv::OpTypeSampledImage:
965 case spv::OpTypeArray:
966 case spv::OpTypeRuntimeArray:
967 case spv::OpTypeStruct:
968 case spv::OpTypeOpaque:
969 case spv::OpTypePointer:
970 case spv::OpTypeFunction:
971 case spv::OpTypeEvent:
972 case spv::OpTypeDeviceEvent:
973 case spv::OpTypeReserveId:
974 case spv::OpTypeQueue:
975 case spv::OpTypePipe:
976 module->def_index[insn.word(1)] = insn.offset();
977 break;
978
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700979 // Fixed constants
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700980 case spv::OpConstantTrue:
981 case spv::OpConstantFalse:
982 case spv::OpConstant:
983 case spv::OpConstantComposite:
984 case spv::OpConstantSampler:
985 case spv::OpConstantNull:
986 module->def_index[insn.word(2)] = insn.offset();
987 break;
988
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700989 // Specialization constants
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700990 case spv::OpSpecConstantTrue:
991 case spv::OpSpecConstantFalse:
992 case spv::OpSpecConstant:
993 case spv::OpSpecConstantComposite:
994 case spv::OpSpecConstantOp:
995 module->def_index[insn.word(2)] = insn.offset();
996 break;
997
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -0700998 // Variables
Tobin Ehlisc96f8062016-03-09 16:12:48 -0700999 case spv::OpVariable:
1000 module->def_index[insn.word(2)] = insn.offset();
1001 break;
1002
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001003 // Functions
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001004 case spv::OpFunction:
1005 module->def_index[insn.word(2)] = insn.offset();
1006 break;
1007
1008 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001009 // We don't care about any other defs for now.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001010 break;
1011 }
1012 }
1013}
1014
1015static spirv_inst_iter find_entrypoint(shader_module *src, char const *name, VkShaderStageFlagBits stageBits) {
1016 for (auto insn : *src) {
1017 if (insn.opcode() == spv::OpEntryPoint) {
1018 auto entrypointName = (char const *)&insn.word(3);
1019 auto entrypointStageBits = 1u << insn.word(1);
1020
1021 if (!strcmp(entrypointName, name) && (entrypointStageBits & stageBits)) {
1022 return insn;
1023 }
1024 }
1025 }
1026
1027 return src->end();
1028}
1029
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001030static char const *storage_class_name(unsigned sc) {
1031 switch (sc) {
1032 case spv::StorageClassInput:
1033 return "input";
1034 case spv::StorageClassOutput:
1035 return "output";
1036 case spv::StorageClassUniformConstant:
1037 return "const uniform";
1038 case spv::StorageClassUniform:
1039 return "uniform";
1040 case spv::StorageClassWorkgroup:
1041 return "workgroup local";
1042 case spv::StorageClassCrossWorkgroup:
1043 return "workgroup global";
1044 case spv::StorageClassPrivate:
1045 return "private global";
1046 case spv::StorageClassFunction:
1047 return "function";
1048 case spv::StorageClassGeneric:
1049 return "generic";
1050 case spv::StorageClassAtomicCounter:
1051 return "atomic counter";
1052 case spv::StorageClassImage:
1053 return "image";
1054 case spv::StorageClassPushConstant:
1055 return "push constant";
1056 default:
1057 return "unknown";
1058 }
1059}
1060
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001061// Get the value of an integral constant
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001062unsigned get_constant_value(shader_module const *src, unsigned id) {
1063 auto value = src->get_def(id);
1064 assert(value != src->end());
1065
1066 if (value.opcode() != spv::OpConstant) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001067 // TODO: Either ensure that the specialization transform is already performed on a module we're
1068 // considering here, OR -- specialize on the fly now.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001069 return 1;
1070 }
1071
1072 return value.word(3);
1073}
1074
Chris Forbesfa86ce32016-03-18 14:59:39 +13001075
1076static void describe_type_inner(std::ostringstream &ss, shader_module const *src, unsigned type) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001077 auto insn = src->get_def(type);
1078 assert(insn != src->end());
1079
1080 switch (insn.opcode()) {
1081 case spv::OpTypeBool:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001082 ss << "bool";
1083 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001084 case spv::OpTypeInt:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001085 ss << (insn.word(3) ? 's' : 'u') << "int" << insn.word(2);
1086 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001087 case spv::OpTypeFloat:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001088 ss << "float" << insn.word(2);
1089 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001090 case spv::OpTypeVector:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001091 ss << "vec" << insn.word(3) << " of ";
1092 describe_type_inner(ss, src, insn.word(2));
1093 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001094 case spv::OpTypeMatrix:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001095 ss << "mat" << insn.word(3) << " of ";
1096 describe_type_inner(ss, src, insn.word(2));
1097 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001098 case spv::OpTypeArray:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001099 ss << "arr[" << get_constant_value(src, insn.word(3)) << "] of ";
1100 describe_type_inner(ss, src, insn.word(2));
1101 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001102 case spv::OpTypePointer:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001103 ss << "ptr to " << storage_class_name(insn.word(2)) << " ";
1104 describe_type_inner(ss, src, insn.word(3));
1105 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001106 case spv::OpTypeStruct: {
Chris Forbesfa86ce32016-03-18 14:59:39 +13001107 ss << "struct of (";
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001108 for (unsigned i = 2; i < insn.len(); i++) {
Chris Forbesfa86ce32016-03-18 14:59:39 +13001109 describe_type_inner(ss, src, insn.word(i));
1110 if (i == insn.len() - 1) {
1111 ss << ")";
1112 } else {
1113 ss << ", ";
1114 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001115 }
Chris Forbesfa86ce32016-03-18 14:59:39 +13001116 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001117 }
1118 case spv::OpTypeSampler:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001119 ss << "sampler";
1120 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001121 case spv::OpTypeSampledImage:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001122 ss << "sampler+";
1123 describe_type_inner(ss, src, insn.word(2));
1124 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001125 case spv::OpTypeImage:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001126 ss << "image(dim=" << insn.word(3) << ", sampled=" << insn.word(7) << ")";
1127 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001128 default:
Chris Forbesfa86ce32016-03-18 14:59:39 +13001129 ss << "oddtype";
1130 break;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001131 }
1132}
1133
Chris Forbesfa86ce32016-03-18 14:59:39 +13001134
1135static std::string describe_type(shader_module const *src, unsigned type) {
1136 std::ostringstream ss;
1137 describe_type_inner(ss, src, type);
1138 return ss.str();
1139}
1140
1141
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001142static bool is_narrow_numeric_type(spirv_inst_iter type)
1143{
1144 if (type.opcode() != spv::OpTypeInt && type.opcode() != spv::OpTypeFloat)
1145 return false;
1146 return type.word(2) < 64;
1147}
1148
1149
1150static bool types_match(shader_module const *a, shader_module const *b, unsigned a_type, unsigned b_type, bool a_arrayed, bool b_arrayed, bool relaxed) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001151 // Walk two type trees together, and complain about differences
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001152 auto a_insn = a->get_def(a_type);
1153 auto b_insn = b->get_def(b_type);
1154 assert(a_insn != a->end());
1155 assert(b_insn != b->end());
1156
Chris Forbes43f01d02016-03-29 16:38:44 +13001157 if (a_arrayed && a_insn.opcode() == spv::OpTypeArray) {
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001158 return types_match(a, b, a_insn.word(2), b_type, false, b_arrayed, relaxed);
Chris Forbes43f01d02016-03-29 16:38:44 +13001159 }
1160
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001161 if (b_arrayed && b_insn.opcode() == spv::OpTypeArray) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001162 // We probably just found the extra level of arrayness in b_type: compare the type inside it to a_type
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001163 return types_match(a, b, a_type, b_insn.word(2), a_arrayed, false, relaxed);
1164 }
1165
1166 if (a_insn.opcode() == spv::OpTypeVector && relaxed && is_narrow_numeric_type(b_insn)) {
1167 return types_match(a, b, a_insn.word(2), b_type, a_arrayed, b_arrayed, false);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001168 }
1169
1170 if (a_insn.opcode() != b_insn.opcode()) {
1171 return false;
1172 }
1173
Chris Forbes43f01d02016-03-29 16:38:44 +13001174 if (a_insn.opcode() == spv::OpTypePointer) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001175 // Match on pointee type. storage class is expected to differ
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001176 return types_match(a, b, a_insn.word(3), b_insn.word(3), a_arrayed, b_arrayed, relaxed);
Chris Forbes43f01d02016-03-29 16:38:44 +13001177 }
1178
1179 if (a_arrayed || b_arrayed) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001180 // If we havent resolved array-of-verts by here, we're not going to.
Chris Forbes43f01d02016-03-29 16:38:44 +13001181 return false;
1182 }
1183
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001184 switch (a_insn.opcode()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001185 case spv::OpTypeBool:
Chris Forbes43f01d02016-03-29 16:38:44 +13001186 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001187 case spv::OpTypeInt:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001188 // Match on width, signedness
Chris Forbes43f01d02016-03-29 16:38:44 +13001189 return a_insn.word(2) == b_insn.word(2) && a_insn.word(3) == b_insn.word(3);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001190 case spv::OpTypeFloat:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001191 // Match on width
Chris Forbes43f01d02016-03-29 16:38:44 +13001192 return a_insn.word(2) == b_insn.word(2);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001193 case spv::OpTypeVector:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001194 // Match on element type, count.
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001195 if (!types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false))
1196 return false;
1197 if (relaxed && is_narrow_numeric_type(a->get_def(a_insn.word(2)))) {
1198 return a_insn.word(3) >= b_insn.word(3);
1199 }
1200 else {
1201 return a_insn.word(3) == b_insn.word(3);
1202 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001203 case spv::OpTypeMatrix:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001204 // Match on element type, count.
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001205 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) && a_insn.word(3) == b_insn.word(3);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001206 case spv::OpTypeArray:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001207 // Match on element type, count. these all have the same layout. we don't get here if b_arrayed. This differs from
1208 // vector & matrix types in that the array size is the id of a constant instruction, * not a literal within OpTypeArray
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001209 return types_match(a, b, a_insn.word(2), b_insn.word(2), a_arrayed, b_arrayed, false) &&
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001210 get_constant_value(a, a_insn.word(3)) == get_constant_value(b, b_insn.word(3));
1211 case spv::OpTypeStruct:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001212 // Match on all element types
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001213 {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001214 if (a_insn.len() != b_insn.len()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001215 return false; // Structs cannot match if member counts differ
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001216 }
1217
1218 for (unsigned i = 2; i < a_insn.len(); i++) {
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001219 if (!types_match(a, b, a_insn.word(i), b_insn.word(i), a_arrayed, b_arrayed, false)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001220 return false;
1221 }
1222 }
1223
1224 return true;
1225 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001226 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001227 // Remaining types are CLisms, or may not appear in the interfaces we are interested in. Just claim no match.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001228 return false;
1229 }
1230}
1231
1232static int value_or_default(std::unordered_map<unsigned, unsigned> const &map, unsigned id, int def) {
1233 auto it = map.find(id);
1234 if (it == map.end())
1235 return def;
1236 else
1237 return it->second;
1238}
1239
1240static unsigned get_locations_consumed_by_type(shader_module const *src, unsigned type, bool strip_array_level) {
1241 auto insn = src->get_def(type);
1242 assert(insn != src->end());
1243
1244 switch (insn.opcode()) {
1245 case spv::OpTypePointer:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001246 // See through the ptr -- this is only ever at the toplevel for graphics shaders we're never actually passing
1247 // pointers around.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001248 return get_locations_consumed_by_type(src, insn.word(3), strip_array_level);
1249 case spv::OpTypeArray:
1250 if (strip_array_level) {
1251 return get_locations_consumed_by_type(src, insn.word(2), false);
1252 } else {
1253 return get_constant_value(src, insn.word(3)) * get_locations_consumed_by_type(src, insn.word(2), false);
1254 }
1255 case spv::OpTypeMatrix:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001256 // Num locations is the dimension * element size
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001257 return insn.word(3) * get_locations_consumed_by_type(src, insn.word(2), false);
Chris Forbesb91daf02016-04-21 14:46:48 +12001258 case spv::OpTypeVector: {
1259 auto scalar_type = src->get_def(insn.word(2));
1260 auto bit_width = (scalar_type.opcode() == spv::OpTypeInt || scalar_type.opcode() == spv::OpTypeFloat) ?
1261 scalar_type.word(2) : 32;
1262
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001263 // Locations are 128-bit wide; 3- and 4-component vectors of 64 bit types require two.
Chris Forbesb91daf02016-04-21 14:46:48 +12001264 return (bit_width * insn.word(3) + 127) / 128;
1265 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001266 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001267 // Everything else is just 1.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001268 return 1;
1269
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001270 // TODO: extend to handle 64bit scalar types, whose vectors may need multiple locations.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001271 }
1272}
1273
Chris Forbese32b78a2016-04-21 15:00:58 +12001274static unsigned get_locations_consumed_by_format(VkFormat format) {
1275 switch (format) {
1276 case VK_FORMAT_R64G64B64A64_SFLOAT:
1277 case VK_FORMAT_R64G64B64A64_SINT:
1278 case VK_FORMAT_R64G64B64A64_UINT:
1279 case VK_FORMAT_R64G64B64_SFLOAT:
1280 case VK_FORMAT_R64G64B64_SINT:
1281 case VK_FORMAT_R64G64B64_UINT:
1282 return 2;
1283 default:
1284 return 1;
1285 }
1286}
1287
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001288typedef std::pair<unsigned, unsigned> location_t;
1289typedef std::pair<unsigned, unsigned> descriptor_slot_t;
1290
1291struct interface_var {
1292 uint32_t id;
1293 uint32_t type_id;
1294 uint32_t offset;
Chris Forbes804bae32016-03-29 16:14:02 +13001295 bool is_patch;
Chris Forbesa0ab8152016-04-20 13:34:27 +12001296 bool is_block_member;
Chris Forbesa1152762016-11-30 12:40:54 +13001297 bool is_relaxed_precision;
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001298 // TODO: collect the name, too? Isn't required to be present.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001299};
1300
Chris Forbes23a575d2016-03-29 16:41:07 +13001301struct shader_stage_attributes {
1302 char const *const name;
1303 bool arrayed_input;
1304 bool arrayed_output;
1305};
1306
1307static shader_stage_attributes shader_stage_attribs[] = {
1308 {"vertex shader", false, false},
1309 {"tessellation control shader", true, true},
1310 {"tessellation evaluation shader", true, false},
1311 {"geometry shader", true, false},
1312 {"fragment shader", false, false},
1313};
1314
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001315static spirv_inst_iter get_struct_type(shader_module const *src, spirv_inst_iter def, bool is_array_of_verts) {
1316 while (true) {
1317
1318 if (def.opcode() == spv::OpTypePointer) {
1319 def = src->get_def(def.word(3));
1320 } else if (def.opcode() == spv::OpTypeArray && is_array_of_verts) {
1321 def = src->get_def(def.word(2));
1322 is_array_of_verts = false;
1323 } else if (def.opcode() == spv::OpTypeStruct) {
1324 return def;
1325 } else {
1326 return src->end();
1327 }
1328 }
1329}
1330
Chris Forbes1832a772016-05-10 15:30:22 +12001331static void collect_interface_block_members(shader_module const *src,
Chris Forbesd68e3202016-08-23 13:04:34 +12001332 std::map<location_t, interface_var> *out,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001333 std::unordered_map<unsigned, unsigned> const &blocks, bool is_array_of_verts,
Chris Forbes804bae32016-03-29 16:14:02 +13001334 uint32_t id, uint32_t type_id, bool is_patch) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001335 // Walk down the type_id presented, trying to determine whether it's actually an interface block.
Chris Forbes23a575d2016-03-29 16:41:07 +13001336 auto type = get_struct_type(src, src->get_def(type_id), is_array_of_verts && !is_patch);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001337 if (type == src->end() || blocks.find(type.word(1)) == blocks.end()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001338 // This isn't an interface block.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001339 return;
1340 }
1341
1342 std::unordered_map<unsigned, unsigned> member_components;
Chris Forbesf929b162016-11-30 12:55:40 +13001343 std::unordered_map<unsigned, unsigned> member_relaxed_precision;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001344
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001345 // Walk all the OpMemberDecorate for type's result id -- first pass, collect components.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001346 for (auto insn : *src) {
1347 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1348 unsigned member_index = insn.word(2);
1349
1350 if (insn.word(3) == spv::DecorationComponent) {
1351 unsigned component = insn.word(4);
1352 member_components[member_index] = component;
1353 }
Chris Forbesf929b162016-11-30 12:55:40 +13001354
1355 if (insn.word(3) == spv::DecorationRelaxedPrecision) {
1356 member_relaxed_precision[member_index] = 1;
1357 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001358 }
1359 }
1360
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001361 // Second pass -- produce the output, from Location decorations
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001362 for (auto insn : *src) {
1363 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
1364 unsigned member_index = insn.word(2);
1365 unsigned member_type_id = type.word(2 + member_index);
1366
1367 if (insn.word(3) == spv::DecorationLocation) {
1368 unsigned location = insn.word(4);
1369 unsigned num_locations = get_locations_consumed_by_type(src, member_type_id, false);
1370 auto component_it = member_components.find(member_index);
1371 unsigned component = component_it == member_components.end() ? 0 : component_it->second;
Chris Forbesf929b162016-11-30 12:55:40 +13001372 bool is_relaxed_precision = member_relaxed_precision.find(member_index) != member_relaxed_precision.end();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001373
1374 for (unsigned int offset = 0; offset < num_locations; offset++) {
Chris Forbesa1152762016-11-30 12:40:54 +13001375 interface_var v = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001376 v.id = id;
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001377 // TODO: member index in interface_var too?
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001378 v.type_id = member_type_id;
1379 v.offset = offset;
Chris Forbes804bae32016-03-29 16:14:02 +13001380 v.is_patch = is_patch;
Chris Forbesa0ab8152016-04-20 13:34:27 +12001381 v.is_block_member = true;
Chris Forbesf929b162016-11-30 12:55:40 +13001382 v.is_relaxed_precision = is_relaxed_precision;
Chris Forbesd68e3202016-08-23 13:04:34 +12001383 (*out)[std::make_pair(location + offset, component)] = v;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001384 }
1385 }
1386 }
1387 }
1388}
1389
Chris Forbesd68e3202016-08-23 13:04:34 +12001390static std::map<location_t, interface_var> collect_interface_by_location(
1391 shader_module const *src, spirv_inst_iter entrypoint,
1392 spv::StorageClass sinterface, bool is_array_of_verts) {
1393
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001394 std::unordered_map<unsigned, unsigned> var_locations;
1395 std::unordered_map<unsigned, unsigned> var_builtins;
1396 std::unordered_map<unsigned, unsigned> var_components;
1397 std::unordered_map<unsigned, unsigned> blocks;
Chris Forbes804bae32016-03-29 16:14:02 +13001398 std::unordered_map<unsigned, unsigned> var_patch;
Chris Forbesa1152762016-11-30 12:40:54 +13001399 std::unordered_map<unsigned, unsigned> var_relaxed_precision;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001400
1401 for (auto insn : *src) {
1402
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001403 // We consider two interface models: SSO rendezvous-by-location, and builtins. Complain about anything that
1404 // fits neither model.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001405 if (insn.opcode() == spv::OpDecorate) {
1406 if (insn.word(2) == spv::DecorationLocation) {
1407 var_locations[insn.word(1)] = insn.word(3);
1408 }
1409
1410 if (insn.word(2) == spv::DecorationBuiltIn) {
1411 var_builtins[insn.word(1)] = insn.word(3);
1412 }
1413
1414 if (insn.word(2) == spv::DecorationComponent) {
1415 var_components[insn.word(1)] = insn.word(3);
1416 }
1417
1418 if (insn.word(2) == spv::DecorationBlock) {
1419 blocks[insn.word(1)] = 1;
1420 }
Chris Forbes804bae32016-03-29 16:14:02 +13001421
1422 if (insn.word(2) == spv::DecorationPatch) {
1423 var_patch[insn.word(1)] = 1;
1424 }
Chris Forbesa1152762016-11-30 12:40:54 +13001425
1426 if (insn.word(2) == spv::DecorationRelaxedPrecision) {
1427 var_relaxed_precision[insn.word(1)] = 1;
1428 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001429 }
1430 }
1431
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001432 // TODO: handle grouped decorations
1433 // TODO: handle index=1 dual source outputs from FS -- two vars will have the same location, and we DON'T want to clobber.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001434
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001435 // Find the end of the entrypoint's name string. additional zero bytes follow the actual null terminator, to fill out the
1436 // rest of the word - so we only need to look at the last byte in the word to determine which word contains the terminator.
Michael Mc Donnell75ecdb72016-04-03 14:47:51 -07001437 uint32_t word = 3;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001438 while (entrypoint.word(word) & 0xff000000u) {
1439 ++word;
1440 }
1441 ++word;
1442
Chris Forbesd68e3202016-08-23 13:04:34 +12001443 std::map<location_t, interface_var> out;
1444
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001445 for (; word < entrypoint.len(); word++) {
1446 auto insn = src->get_def(entrypoint.word(word));
1447 assert(insn != src->end());
1448 assert(insn.opcode() == spv::OpVariable);
1449
Jamie Madill2b6b8d52016-04-04 15:09:51 -04001450 if (insn.word(3) == static_cast<uint32_t>(sinterface)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001451 unsigned id = insn.word(2);
1452 unsigned type = insn.word(1);
1453
1454 int location = value_or_default(var_locations, id, -1);
1455 int builtin = value_or_default(var_builtins, id, -1);
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001456 unsigned component = value_or_default(var_components, id, 0); // Unspecified is OK, is 0
Chris Forbes804bae32016-03-29 16:14:02 +13001457 bool is_patch = var_patch.find(id) != var_patch.end();
Chris Forbesa1152762016-11-30 12:40:54 +13001458 bool is_relaxed_precision = var_relaxed_precision.find(id) != var_relaxed_precision.end();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001459
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001460 // All variables and interface block members in the Input or Output storage classes must be decorated with either
1461 // a builtin or an explicit location.
1462 //
1463 // TODO: integrate the interface block support here. For now, don't complain -- a valid SPIRV module will only hit
1464 // this path for the interface block case, as the individual members of the type are decorated, rather than
1465 // variable declarations.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001466
1467 if (location != -1) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001468 // A user-defined interface variable, with a location. Where a variable occupied multiple locations, emit
1469 // one result for each.
Chris Forbes43f01d02016-03-29 16:38:44 +13001470 unsigned num_locations = get_locations_consumed_by_type(src, type, is_array_of_verts && !is_patch);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001471 for (unsigned int offset = 0; offset < num_locations; offset++) {
Chris Forbesa1152762016-11-30 12:40:54 +13001472 interface_var v = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001473 v.id = id;
1474 v.type_id = type;
1475 v.offset = offset;
Chris Forbes804bae32016-03-29 16:14:02 +13001476 v.is_patch = is_patch;
Chris Forbesa1152762016-11-30 12:40:54 +13001477 v.is_relaxed_precision = is_relaxed_precision;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001478 out[std::make_pair(location + offset, component)] = v;
1479 }
1480 } else if (builtin == -1) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001481 // An interface block instance
Chris Forbesd68e3202016-08-23 13:04:34 +12001482 collect_interface_block_members(src, &out, blocks, is_array_of_verts, id, type, is_patch);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001483 }
1484 }
1485 }
Chris Forbesd68e3202016-08-23 13:04:34 +12001486
1487 return out;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001488}
1489
Chris Forbesd68e3202016-08-23 13:04:34 +12001490static std::vector<std::pair<uint32_t, interface_var>> collect_interface_by_input_attachment_index(
1491 debug_report_data *report_data, shader_module const *src,
1492 std::unordered_set<uint32_t> const &accessible_ids) {
1493
1494 std::vector<std::pair<uint32_t, interface_var>> out;
Chris Forbes07ac1f32016-08-22 14:58:35 +12001495
1496 for (auto insn : *src) {
1497 if (insn.opcode() == spv::OpDecorate) {
1498 if (insn.word(2) == spv::DecorationInputAttachmentIndex) {
1499 auto attachment_index = insn.word(3);
1500 auto id = insn.word(1);
1501
1502 if (accessible_ids.count(id)) {
1503 auto def = src->get_def(id);
1504 assert(def != src->end());
1505
1506 if (def.opcode() == spv::OpVariable && insn.word(3) == spv::StorageClassUniformConstant) {
Chris Forbes56f28452016-08-22 15:31:18 +12001507 auto num_locations = get_locations_consumed_by_type(src, def.word(1), false);
1508 for (unsigned int offset = 0; offset < num_locations; offset++) {
Chris Forbesa1152762016-11-30 12:40:54 +13001509 interface_var v = {};
Chris Forbes56f28452016-08-22 15:31:18 +12001510 v.id = id;
1511 v.type_id = def.word(1);
1512 v.offset = offset;
Chris Forbes56f28452016-08-22 15:31:18 +12001513 out.emplace_back(attachment_index + offset, v);
1514 }
Chris Forbes07ac1f32016-08-22 14:58:35 +12001515 }
1516 }
1517 }
1518 }
1519 }
Chris Forbesd68e3202016-08-23 13:04:34 +12001520
1521 return out;
Chris Forbes07ac1f32016-08-22 14:58:35 +12001522}
1523
Chris Forbesd68e3202016-08-23 13:04:34 +12001524static std::vector<std::pair<descriptor_slot_t, interface_var>> collect_interface_by_descriptor_slot(
1525 debug_report_data *report_data, shader_module const *src,
1526 std::unordered_set<uint32_t> const &accessible_ids) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001527
1528 std::unordered_map<unsigned, unsigned> var_sets;
1529 std::unordered_map<unsigned, unsigned> var_bindings;
1530
1531 for (auto insn : *src) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001532 // All variables in the Uniform or UniformConstant storage classes are required to be decorated with both
1533 // DecorationDescriptorSet and DecorationBinding.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001534 if (insn.opcode() == spv::OpDecorate) {
1535 if (insn.word(2) == spv::DecorationDescriptorSet) {
1536 var_sets[insn.word(1)] = insn.word(3);
1537 }
1538
1539 if (insn.word(2) == spv::DecorationBinding) {
1540 var_bindings[insn.word(1)] = insn.word(3);
1541 }
1542 }
1543 }
1544
Chris Forbesd68e3202016-08-23 13:04:34 +12001545 std::vector<std::pair<descriptor_slot_t, interface_var>> out;
1546
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001547 for (auto id : accessible_ids) {
1548 auto insn = src->get_def(id);
1549 assert(insn != src->end());
1550
1551 if (insn.opcode() == spv::OpVariable &&
1552 (insn.word(3) == spv::StorageClassUniform || insn.word(3) == spv::StorageClassUniformConstant)) {
1553 unsigned set = value_or_default(var_sets, insn.word(2), 0);
1554 unsigned binding = value_or_default(var_bindings, insn.word(2), 0);
1555
Chris Forbesa1152762016-11-30 12:40:54 +13001556 interface_var v = {};
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001557 v.id = insn.word(2);
1558 v.type_id = insn.word(1);
Chris Forbesbe8986a2016-07-19 15:08:38 +12001559 out.emplace_back(std::make_pair(set, binding), v);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001560 }
1561 }
Chris Forbesd68e3202016-08-23 13:04:34 +12001562
1563 return out;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001564}
1565
Chris Forbes1832a772016-05-10 15:30:22 +12001566static bool validate_interface_between_stages(debug_report_data *report_data, shader_module const *producer,
Chris Forbes23a575d2016-03-29 16:41:07 +13001567 spirv_inst_iter producer_entrypoint, shader_stage_attributes const *producer_stage,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001568 shader_module const *consumer, spirv_inst_iter consumer_entrypoint,
Chris Forbes23a575d2016-03-29 16:41:07 +13001569 shader_stage_attributes const *consumer_stage) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001570 bool pass = true;
1571
Chris Forbesd68e3202016-08-23 13:04:34 +12001572 auto outputs = collect_interface_by_location(producer, producer_entrypoint, spv::StorageClassOutput, producer_stage->arrayed_output);
1573 auto inputs = collect_interface_by_location(consumer, consumer_entrypoint, spv::StorageClassInput, consumer_stage->arrayed_input);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001574
1575 auto a_it = outputs.begin();
1576 auto b_it = inputs.begin();
1577
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001578 // Maps sorted by key (location); walk them together to find mismatches
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001579 while ((outputs.size() > 0 && a_it != outputs.end()) || (inputs.size() && b_it != inputs.end())) {
1580 bool a_at_end = outputs.size() == 0 || a_it == outputs.end();
1581 bool b_at_end = inputs.size() == 0 || b_it == inputs.end();
1582 auto a_first = a_at_end ? std::make_pair(0u, 0u) : a_it->first;
1583 auto b_first = b_at_end ? std::make_pair(0u, 0u) : b_it->first;
1584
1585 if (b_at_end || ((!a_at_end) && (a_first < b_first))) {
Chris Forbes1832a772016-05-10 15:30:22 +12001586 if (log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13001587 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
Chris Forbes23a575d2016-03-29 16:41:07 +13001588 "%s writes to output location %u.%u which is not consumed by %s", producer_stage->name, a_first.first,
1589 a_first.second, consumer_stage->name)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001590 pass = false;
1591 }
1592 a_it++;
1593 } else if (a_at_end || a_first > b_first) {
Chris Forbes1832a772016-05-10 15:30:22 +12001594 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001595 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC",
Chris Forbes23a575d2016-03-29 16:41:07 +13001596 "%s consumes input location %u.%u which is not written by %s", consumer_stage->name, b_first.first, b_first.second,
1597 producer_stage->name)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001598 pass = false;
1599 }
1600 b_it++;
1601 } else {
Chris Forbesa0ab8152016-04-20 13:34:27 +12001602 // subtleties of arrayed interfaces:
1603 // - if is_patch, then the member is not arrayed, even though the interface may be.
1604 // - if is_block_member, then the extra array level of an arrayed interface is not
1605 // expressed in the member type -- it's expressed in the block type.
Chris Forbes218deeb2016-03-29 16:57:02 +13001606 if (!types_match(producer, consumer, a_it->second.type_id, b_it->second.type_id,
Chris Forbesa0ab8152016-04-20 13:34:27 +12001607 producer_stage->arrayed_output && !a_it->second.is_patch && !a_it->second.is_block_member,
1608 consumer_stage->arrayed_input && !b_it->second.is_patch && !b_it->second.is_block_member,
Chris Forbes4ad9cbd2016-04-05 17:51:35 +12001609 true)) {
Chris Forbes1832a772016-05-10 15:30:22 +12001610 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001611 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC", "Type mismatch on location %u.%u: '%s' vs '%s'",
Chris Forbesfa86ce32016-03-18 14:59:39 +13001612 a_first.first, a_first.second,
1613 describe_type(producer, a_it->second.type_id).c_str(),
1614 describe_type(consumer, b_it->second.type_id).c_str())) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001615 pass = false;
1616 }
1617 }
Chris Forbes218deeb2016-03-29 16:57:02 +13001618 if (a_it->second.is_patch != b_it->second.is_patch) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001619 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
Chris Forbes218deeb2016-03-29 16:57:02 +13001620 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
Chris Forbesa0193bc2016-04-04 19:19:47 +12001621 "Decoration mismatch on location %u.%u: is per-%s in %s stage but "
Chris Forbes218deeb2016-03-29 16:57:02 +13001622 "per-%s in %s stage", a_first.first, a_first.second,
1623 a_it->second.is_patch ? "patch" : "vertex", producer_stage->name,
1624 b_it->second.is_patch ? "patch" : "vertex", consumer_stage->name)) {
1625 pass = false;
1626 }
1627 }
Chris Forbes44208392016-11-30 12:45:00 +13001628 if (a_it->second.is_relaxed_precision != b_it->second.is_relaxed_precision) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001629 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
Chris Forbes44208392016-11-30 12:45:00 +13001630 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
1631 "Decoration mismatch on location %u.%u: %s and %s stages differ in precision",
1632 a_first.first, a_first.second,
1633 producer_stage->name,
1634 consumer_stage->name)) {
1635 pass = false;
1636 }
1637 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001638 a_it++;
1639 b_it++;
1640 }
1641 }
1642
1643 return pass;
1644}
1645
1646enum FORMAT_TYPE {
1647 FORMAT_TYPE_UNDEFINED,
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001648 FORMAT_TYPE_FLOAT, // UNORM, SNORM, FLOAT, USCALED, SSCALED, SRGB -- anything we consider float in the shader
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001649 FORMAT_TYPE_SINT,
1650 FORMAT_TYPE_UINT,
1651};
1652
1653static unsigned get_format_type(VkFormat fmt) {
1654 switch (fmt) {
1655 case VK_FORMAT_UNDEFINED:
1656 return FORMAT_TYPE_UNDEFINED;
1657 case VK_FORMAT_R8_SINT:
1658 case VK_FORMAT_R8G8_SINT:
1659 case VK_FORMAT_R8G8B8_SINT:
1660 case VK_FORMAT_R8G8B8A8_SINT:
1661 case VK_FORMAT_R16_SINT:
1662 case VK_FORMAT_R16G16_SINT:
1663 case VK_FORMAT_R16G16B16_SINT:
1664 case VK_FORMAT_R16G16B16A16_SINT:
1665 case VK_FORMAT_R32_SINT:
1666 case VK_FORMAT_R32G32_SINT:
1667 case VK_FORMAT_R32G32B32_SINT:
1668 case VK_FORMAT_R32G32B32A32_SINT:
Chris Forbesf57a5202016-04-20 14:22:07 +12001669 case VK_FORMAT_R64_SINT:
1670 case VK_FORMAT_R64G64_SINT:
1671 case VK_FORMAT_R64G64B64_SINT:
1672 case VK_FORMAT_R64G64B64A64_SINT:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001673 case VK_FORMAT_B8G8R8_SINT:
1674 case VK_FORMAT_B8G8R8A8_SINT:
Chris Forbese9a21d42016-04-20 14:16:10 +12001675 case VK_FORMAT_A8B8G8R8_SINT_PACK32:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001676 case VK_FORMAT_A2B10G10R10_SINT_PACK32:
1677 case VK_FORMAT_A2R10G10B10_SINT_PACK32:
1678 return FORMAT_TYPE_SINT;
1679 case VK_FORMAT_R8_UINT:
1680 case VK_FORMAT_R8G8_UINT:
1681 case VK_FORMAT_R8G8B8_UINT:
1682 case VK_FORMAT_R8G8B8A8_UINT:
1683 case VK_FORMAT_R16_UINT:
1684 case VK_FORMAT_R16G16_UINT:
1685 case VK_FORMAT_R16G16B16_UINT:
1686 case VK_FORMAT_R16G16B16A16_UINT:
1687 case VK_FORMAT_R32_UINT:
1688 case VK_FORMAT_R32G32_UINT:
1689 case VK_FORMAT_R32G32B32_UINT:
1690 case VK_FORMAT_R32G32B32A32_UINT:
Chris Forbesf57a5202016-04-20 14:22:07 +12001691 case VK_FORMAT_R64_UINT:
1692 case VK_FORMAT_R64G64_UINT:
1693 case VK_FORMAT_R64G64B64_UINT:
1694 case VK_FORMAT_R64G64B64A64_UINT:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001695 case VK_FORMAT_B8G8R8_UINT:
1696 case VK_FORMAT_B8G8R8A8_UINT:
Chris Forbese9a21d42016-04-20 14:16:10 +12001697 case VK_FORMAT_A8B8G8R8_UINT_PACK32:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001698 case VK_FORMAT_A2B10G10R10_UINT_PACK32:
1699 case VK_FORMAT_A2R10G10B10_UINT_PACK32:
1700 return FORMAT_TYPE_UINT;
1701 default:
1702 return FORMAT_TYPE_FLOAT;
1703 }
1704}
1705
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001706// characterizes a SPIR-V type appearing in an interface to a FF stage, for comparison to a VkFormat's characterization above.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001707static unsigned get_fundamental_type(shader_module const *src, unsigned type) {
1708 auto insn = src->get_def(type);
1709 assert(insn != src->end());
1710
1711 switch (insn.opcode()) {
1712 case spv::OpTypeInt:
1713 return insn.word(3) ? FORMAT_TYPE_SINT : FORMAT_TYPE_UINT;
1714 case spv::OpTypeFloat:
1715 return FORMAT_TYPE_FLOAT;
1716 case spv::OpTypeVector:
1717 return get_fundamental_type(src, insn.word(2));
1718 case spv::OpTypeMatrix:
1719 return get_fundamental_type(src, insn.word(2));
1720 case spv::OpTypeArray:
1721 return get_fundamental_type(src, insn.word(2));
1722 case spv::OpTypePointer:
1723 return get_fundamental_type(src, insn.word(3));
Chris Forbes383352e2016-08-22 16:36:54 +12001724 case spv::OpTypeImage:
1725 return get_fundamental_type(src, insn.word(2));
1726
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001727 default:
1728 return FORMAT_TYPE_UNDEFINED;
1729 }
1730}
1731
1732static uint32_t get_shader_stage_id(VkShaderStageFlagBits stage) {
1733 uint32_t bit_pos = u_ffs(stage);
1734 return bit_pos - 1;
1735}
1736
Chris Forbes1832a772016-05-10 15:30:22 +12001737static bool validate_vi_consistency(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001738 // Walk the binding descriptions, which describe the step rate and stride of each vertex buffer. Each binding should
1739 // be specified only once.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001740 std::unordered_map<uint32_t, VkVertexInputBindingDescription const *> bindings;
1741 bool pass = true;
1742
1743 for (unsigned i = 0; i < vi->vertexBindingDescriptionCount; i++) {
1744 auto desc = &vi->pVertexBindingDescriptions[i];
1745 auto &binding = bindings[desc->binding];
1746 if (binding) {
Jeremy Hayese2583052016-12-12 11:01:28 -07001747 // TODO: VALIDATION_ERROR_02105 perhaps?
Chris Forbes1832a772016-05-10 15:30:22 +12001748 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001749 __LINE__, SHADER_CHECKER_INCONSISTENT_VI, "SC",
1750 "Duplicate vertex input binding descriptions for binding %d", desc->binding)) {
1751 pass = false;
1752 }
1753 } else {
1754 binding = desc;
1755 }
1756 }
1757
1758 return pass;
1759}
1760
Chris Forbes1832a772016-05-10 15:30:22 +12001761static bool validate_vi_against_vs_inputs(debug_report_data *report_data, VkPipelineVertexInputStateCreateInfo const *vi,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001762 shader_module const *vs, spirv_inst_iter entrypoint) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001763 bool pass = true;
1764
Chris Forbesd68e3202016-08-23 13:04:34 +12001765 auto inputs = collect_interface_by_location(vs, entrypoint, spv::StorageClassInput, false);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001766
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001767 // Build index by location
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001768 std::map<uint32_t, VkVertexInputAttributeDescription const *> attribs;
1769 if (vi) {
Chris Forbese32b78a2016-04-21 15:00:58 +12001770 for (unsigned i = 0; i < vi->vertexAttributeDescriptionCount; i++) {
1771 auto num_locations = get_locations_consumed_by_format(vi->pVertexAttributeDescriptions[i].format);
1772 for (auto j = 0u; j < num_locations; j++) {
1773 attribs[vi->pVertexAttributeDescriptions[i].location + j] = &vi->pVertexAttributeDescriptions[i];
1774 }
1775 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001776 }
1777
1778 auto it_a = attribs.begin();
1779 auto it_b = inputs.begin();
Chris Forbes22dd5d22016-07-06 12:18:26 +12001780 bool used = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001781
1782 while ((attribs.size() > 0 && it_a != attribs.end()) || (inputs.size() > 0 && it_b != inputs.end())) {
1783 bool a_at_end = attribs.size() == 0 || it_a == attribs.end();
1784 bool b_at_end = inputs.size() == 0 || it_b == inputs.end();
1785 auto a_first = a_at_end ? 0 : it_a->first;
1786 auto b_first = b_at_end ? 0 : it_b->first.first;
1787 if (!a_at_end && (b_at_end || a_first < b_first)) {
Chris Forbes22dd5d22016-07-06 12:18:26 +12001788 if (!used && log_msg(report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13001789 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
Mike Weiblencce7ec72016-10-17 19:33:05 -06001790 "Vertex attribute at location %d not consumed by vertex shader", a_first)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001791 pass = false;
1792 }
Chris Forbes22dd5d22016-07-06 12:18:26 +12001793 used = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001794 it_a++;
1795 } else if (!b_at_end && (a_at_end || b_first < a_first)) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001796 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0,
Mike Weiblen15bd38e2016-10-03 19:19:41 -06001797 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Vertex shader consumes input at location %d but not provided",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001798 b_first)) {
1799 pass = false;
1800 }
1801 it_b++;
1802 } else {
1803 unsigned attrib_type = get_format_type(it_a->second->format);
1804 unsigned input_type = get_fundamental_type(vs, it_b->second.type_id);
1805
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001806 // Type checking
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001807 if (attrib_type != FORMAT_TYPE_UNDEFINED && input_type != FORMAT_TYPE_UNDEFINED && attrib_type != input_type) {
Chris Forbes1832a772016-05-10 15:30:22 +12001808 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001809 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
Mike Weiblen15bd38e2016-10-03 19:19:41 -06001810 "Attribute type of `%s` at location %d does not match vertex shader input type of `%s`",
Chris Forbesfa86ce32016-03-18 14:59:39 +13001811 string_VkFormat(it_a->second->format), a_first,
1812 describe_type(vs, it_b->second.type_id).c_str())) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001813 pass = false;
1814 }
1815 }
1816
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001817 // OK!
Chris Forbes22dd5d22016-07-06 12:18:26 +12001818 used = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001819 it_b++;
1820 }
1821 }
1822
1823 return pass;
1824}
1825
Chris Forbes1832a772016-05-10 15:30:22 +12001826static bool validate_fs_outputs_against_render_pass(debug_report_data *report_data, shader_module const *fs,
Tobin Ehlisc677a092016-06-27 12:57:05 -06001827 spirv_inst_iter entrypoint, VkRenderPassCreateInfo const *rpci,
1828 uint32_t subpass_index) {
Chris Forbes7cf00d42016-04-29 17:33:03 +12001829 std::map<uint32_t, VkFormat> color_attachments;
Tobin Ehlisc677a092016-06-27 12:57:05 -06001830 auto subpass = rpci->pSubpasses[subpass_index];
1831 for (auto i = 0u; i < subpass.colorAttachmentCount; ++i) {
Tobin Ehlis02656182016-07-01 13:54:41 -06001832 uint32_t attachment = subpass.pColorAttachments[i].attachment;
1833 if (attachment == VK_ATTACHMENT_UNUSED)
1834 continue;
1835 if (rpci->pAttachments[attachment].format != VK_FORMAT_UNDEFINED) {
1836 color_attachments[i] = rpci->pAttachments[attachment].format;
Chris Forbes7cf00d42016-04-29 17:33:03 +12001837 }
1838 }
1839
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001840 bool pass = true;
1841
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001842 // TODO: dual source blend index (spv::DecIndex, zero if not provided)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001843
Chris Forbesd68e3202016-08-23 13:04:34 +12001844 auto outputs = collect_interface_by_location(fs, entrypoint, spv::StorageClassOutput, false);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001845
Chris Forbes7cf00d42016-04-29 17:33:03 +12001846 auto it_a = outputs.begin();
1847 auto it_b = color_attachments.begin();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001848
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001849 // Walk attachment list and outputs together
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001850
Chris Forbes7cf00d42016-04-29 17:33:03 +12001851 while ((outputs.size() > 0 && it_a != outputs.end()) || (color_attachments.size() > 0 && it_b != color_attachments.end())) {
1852 bool a_at_end = outputs.size() == 0 || it_a == outputs.end();
1853 bool b_at_end = color_attachments.size() == 0 || it_b == color_attachments.end();
1854
1855 if (!a_at_end && (b_at_end || it_a->first.first < it_b->first)) {
Chris Forbes1832a772016-05-10 15:30:22 +12001856 if (log_msg(report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001857 __LINE__, SHADER_CHECKER_OUTPUT_NOT_CONSUMED, "SC",
Mike Weiblencce7ec72016-10-17 19:33:05 -06001858 "fragment shader writes to output location %d with no matching attachment", it_a->first.first)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001859 pass = false;
1860 }
Chris Forbes7cf00d42016-04-29 17:33:03 +12001861 it_a++;
1862 } else if (!b_at_end && (a_at_end || it_a->first.first > it_b->first)) {
Chris Forbes1832a772016-05-10 15:30:22 +12001863 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Mike Weiblencce7ec72016-10-17 19:33:05 -06001864 __LINE__, SHADER_CHECKER_INPUT_NOT_PRODUCED, "SC", "Attachment %d not written by fragment shader",
1865 it_b->first)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001866 pass = false;
1867 }
Chris Forbes7cf00d42016-04-29 17:33:03 +12001868 it_b++;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001869 } else {
Chris Forbes7cf00d42016-04-29 17:33:03 +12001870 unsigned output_type = get_fundamental_type(fs, it_a->second.type_id);
1871 unsigned att_type = get_format_type(it_b->second);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001872
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001873 // Type checking
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001874 if (att_type != FORMAT_TYPE_UNDEFINED && output_type != FORMAT_TYPE_UNDEFINED && att_type != output_type) {
Chris Forbes1832a772016-05-10 15:30:22 +12001875 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001876 __LINE__, SHADER_CHECKER_INTERFACE_TYPE_MISMATCH, "SC",
Mike Weiblencce7ec72016-10-17 19:33:05 -06001877 "Attachment %d of type `%s` does not match fragment shader output type of `%s`", it_b->first,
Chris Forbes7cf00d42016-04-29 17:33:03 +12001878 string_VkFormat(it_b->second),
1879 describe_type(fs, it_a->second.type_id).c_str())) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001880 pass = false;
1881 }
1882 }
1883
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001884 // OK!
Chris Forbes7cf00d42016-04-29 17:33:03 +12001885 it_a++;
1886 it_b++;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001887 }
1888 }
1889
1890 return pass;
1891}
1892
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001893// For some analyses, we need to know about all ids referenced by the static call tree of a particular entrypoint. This is
1894// important for identifying the set of shader resources actually used by an entrypoint, for example.
1895// Note: we only explore parts of the image which might actually contain ids we care about for the above analyses.
1896// - NOT the shader input/output interfaces.
1897//
1898// TODO: The set of interesting opcodes here was determined by eyeballing the SPIRV spec. It might be worth
1899// converting parts of this to be generated from the machine-readable spec instead.
Chris Forbesd68e3202016-08-23 13:04:34 +12001900static std::unordered_set<uint32_t> mark_accessible_ids(shader_module const *src, spirv_inst_iter entrypoint) {
1901 std::unordered_set<uint32_t> ids;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001902 std::unordered_set<uint32_t> worklist;
1903 worklist.insert(entrypoint.word(2));
1904
1905 while (!worklist.empty()) {
1906 auto id_iter = worklist.begin();
1907 auto id = *id_iter;
1908 worklist.erase(id_iter);
1909
1910 auto insn = src->get_def(id);
1911 if (insn == src->end()) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001912 // ID is something we didn't collect in build_def_index. that's OK -- we'll stumble across all kinds of things here
1913 // that we may not care about.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001914 continue;
1915 }
1916
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001917 // Try to add to the output set
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001918 if (!ids.insert(id).second) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001919 continue; // If we already saw this id, we don't want to walk it again.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001920 }
1921
1922 switch (insn.opcode()) {
1923 case spv::OpFunction:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001924 // Scan whole body of the function, enlisting anything interesting
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001925 while (++insn, insn.opcode() != spv::OpFunctionEnd) {
1926 switch (insn.opcode()) {
1927 case spv::OpLoad:
1928 case spv::OpAtomicLoad:
1929 case spv::OpAtomicExchange:
1930 case spv::OpAtomicCompareExchange:
1931 case spv::OpAtomicCompareExchangeWeak:
1932 case spv::OpAtomicIIncrement:
1933 case spv::OpAtomicIDecrement:
1934 case spv::OpAtomicIAdd:
1935 case spv::OpAtomicISub:
1936 case spv::OpAtomicSMin:
1937 case spv::OpAtomicUMin:
1938 case spv::OpAtomicSMax:
1939 case spv::OpAtomicUMax:
1940 case spv::OpAtomicAnd:
1941 case spv::OpAtomicOr:
1942 case spv::OpAtomicXor:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001943 worklist.insert(insn.word(3)); // ptr
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001944 break;
1945 case spv::OpStore:
1946 case spv::OpAtomicStore:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001947 worklist.insert(insn.word(1)); // ptr
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001948 break;
1949 case spv::OpAccessChain:
1950 case spv::OpInBoundsAccessChain:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001951 worklist.insert(insn.word(3)); // base ptr
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001952 break;
1953 case spv::OpSampledImage:
1954 case spv::OpImageSampleImplicitLod:
1955 case spv::OpImageSampleExplicitLod:
1956 case spv::OpImageSampleDrefImplicitLod:
1957 case spv::OpImageSampleDrefExplicitLod:
1958 case spv::OpImageSampleProjImplicitLod:
1959 case spv::OpImageSampleProjExplicitLod:
1960 case spv::OpImageSampleProjDrefImplicitLod:
1961 case spv::OpImageSampleProjDrefExplicitLod:
1962 case spv::OpImageFetch:
1963 case spv::OpImageGather:
1964 case spv::OpImageDrefGather:
1965 case spv::OpImageRead:
1966 case spv::OpImage:
1967 case spv::OpImageQueryFormat:
1968 case spv::OpImageQueryOrder:
1969 case spv::OpImageQuerySizeLod:
1970 case spv::OpImageQuerySize:
1971 case spv::OpImageQueryLod:
1972 case spv::OpImageQueryLevels:
1973 case spv::OpImageQuerySamples:
1974 case spv::OpImageSparseSampleImplicitLod:
1975 case spv::OpImageSparseSampleExplicitLod:
1976 case spv::OpImageSparseSampleDrefImplicitLod:
1977 case spv::OpImageSparseSampleDrefExplicitLod:
1978 case spv::OpImageSparseSampleProjImplicitLod:
1979 case spv::OpImageSparseSampleProjExplicitLod:
1980 case spv::OpImageSparseSampleProjDrefImplicitLod:
1981 case spv::OpImageSparseSampleProjDrefExplicitLod:
1982 case spv::OpImageSparseFetch:
1983 case spv::OpImageSparseGather:
1984 case spv::OpImageSparseDrefGather:
1985 case spv::OpImageTexelPointer:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001986 worklist.insert(insn.word(3)); // Image or sampled image
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001987 break;
1988 case spv::OpImageWrite:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001989 worklist.insert(insn.word(1)); // Image -- different operand order to above
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001990 break;
1991 case spv::OpFunctionCall:
Michael Mc Donnell75ecdb72016-04-03 14:47:51 -07001992 for (uint32_t i = 3; i < insn.len(); i++) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001993 worklist.insert(insn.word(i)); // fn itself, and all args
Tobin Ehlisc96f8062016-03-09 16:12:48 -07001994 }
1995 break;
1996
1997 case spv::OpExtInst:
Michael Mc Donnell75ecdb72016-04-03 14:47:51 -07001998 for (uint32_t i = 5; i < insn.len(); i++) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07001999 worklist.insert(insn.word(i)); // Operands to ext inst
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002000 }
2001 break;
2002 }
2003 }
2004 break;
2005 }
2006 }
Chris Forbesd68e3202016-08-23 13:04:34 +12002007
2008 return ids;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002009}
2010
Chris Forbes1832a772016-05-10 15:30:22 +12002011static bool validate_push_constant_block_against_pipeline(debug_report_data *report_data,
Tobin Ehlis3df41292016-07-07 09:23:38 -06002012 std::vector<VkPushConstantRange> const *push_constant_ranges,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002013 shader_module const *src, spirv_inst_iter type,
2014 VkShaderStageFlagBits stage) {
2015 bool pass = true;
2016
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002017 // Strip off ptrs etc
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002018 type = get_struct_type(src, type, false);
2019 assert(type != src->end());
2020
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002021 // Validate directly off the offsets. this isn't quite correct for arrays and matrices, but is a good first step.
2022 // TODO: arrays, matrices, weird sizes
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002023 for (auto insn : *src) {
2024 if (insn.opcode() == spv::OpMemberDecorate && insn.word(1) == type.word(1)) {
2025
2026 if (insn.word(3) == spv::DecorationOffset) {
2027 unsigned offset = insn.word(4);
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002028 auto size = 4; // Bytes; TODO: calculate this based on the type
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002029
2030 bool found_range = false;
Tobin Ehlis3df41292016-07-07 09:23:38 -06002031 for (auto const &range : *push_constant_ranges) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002032 if (range.offset <= offset && range.offset + range.size >= offset + size) {
2033 found_range = true;
2034
2035 if ((range.stageFlags & stage) == 0) {
Chris Forbes1832a772016-05-10 15:30:22 +12002036 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002037 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_NOT_ACCESSIBLE_FROM_STAGE, "SC",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002038 "Push constant range covering variable starting at "
2039 "offset %u not accessible from stage %s",
2040 offset, string_VkShaderStageFlagBits(stage))) {
2041 pass = false;
2042 }
2043 }
2044
2045 break;
2046 }
2047 }
2048
2049 if (!found_range) {
Chris Forbes1832a772016-05-10 15:30:22 +12002050 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002051 __LINE__, SHADER_CHECKER_PUSH_CONSTANT_OUT_OF_RANGE, "SC",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002052 "Push constant range covering variable starting at "
2053 "offset %u not declared in layout",
2054 offset)) {
2055 pass = false;
2056 }
2057 }
2058 }
2059 }
2060 }
2061
2062 return pass;
2063}
2064
Chris Forbes1832a772016-05-10 15:30:22 +12002065static bool validate_push_constant_usage(debug_report_data *report_data,
Tobin Ehlis3df41292016-07-07 09:23:38 -06002066 std::vector<VkPushConstantRange> const *push_constant_ranges, shader_module const *src,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002067 std::unordered_set<uint32_t> accessible_ids, VkShaderStageFlagBits stage) {
2068 bool pass = true;
2069
2070 for (auto id : accessible_ids) {
2071 auto def_insn = src->get_def(id);
2072 if (def_insn.opcode() == spv::OpVariable && def_insn.word(3) == spv::StorageClassPushConstant) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06002073 pass &= validate_push_constant_block_against_pipeline(report_data, push_constant_ranges, src,
2074 src->get_def(def_insn.word(1)), stage);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002075 }
2076 }
2077
2078 return pass;
2079}
2080
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002081// For given pipelineLayout verify that the set_layout_node at slot.first
2082// has the requested binding at slot.second and return ptr to that binding
Chris Forbes81d95212016-05-20 18:27:28 +12002083static VkDescriptorSetLayoutBinding const * get_descriptor_binding(PIPELINE_LAYOUT_NODE const *pipelineLayout, descriptor_slot_t slot) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002084
2085 if (!pipelineLayout)
Chris Forbes4e4191b2016-03-18 11:14:27 +13002086 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002087
Tobin Ehlis3df41292016-07-07 09:23:38 -06002088 if (slot.first >= pipelineLayout->set_layouts.size())
Chris Forbes4e4191b2016-03-18 11:14:27 +13002089 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002090
Tobin Ehlis3df41292016-07-07 09:23:38 -06002091 return pipelineLayout->set_layouts[slot.first]->GetDescriptorSetLayoutBindingPtrFromBinding(slot.second);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002092}
2093
2094// Block of code at start here for managing/tracking Pipeline state that this layer cares about
2095
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002096// TODO : Should be tracking lastBound per commandBuffer and when draws occur, report based on that cmd buffer lastBound
2097// Then need to synchronize the accesses based on cmd buffer so that if I'm reading state on one cmd buffer, updates
2098// to that same cmd buffer by separate thread are not changing state from underneath us
2099// Track the last cmd buffer touched by this thread
2100
Dustin Graves8f1eab92016-04-05 09:41:17 -06002101static bool hasDrawCmd(GLOBAL_CB_NODE *pCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002102 for (uint32_t i = 0; i < NUM_DRAW_TYPES; i++) {
2103 if (pCB->drawCount[i])
Dustin Graves8f1eab92016-04-05 09:41:17 -06002104 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002105 }
Dustin Graves8f1eab92016-04-05 09:41:17 -06002106 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002107}
2108
2109// Check object status for selected flag state
Dustin Graves8f1eab92016-04-05 09:41:17 -06002110static bool validate_status(layer_data *my_data, GLOBAL_CB_NODE *pNode, CBStatusFlags status_mask, VkFlags msg_flags,
Jeremy Hayese2583052016-12-12 11:01:28 -07002111 const char *fail_msg, UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002112 if (!(pNode->status & status_mask)) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002113 char const *const message = validation_error_map[msg_code];
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002114 return log_msg(my_data->report_data, msg_flags, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002115 reinterpret_cast<const uint64_t &>(pNode->commandBuffer), __LINE__, msg_code, "DS",
2116 "command buffer object 0x%p: %s. %s.", pNode->commandBuffer, fail_msg, message);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002117 }
Dustin Graves8f1eab92016-04-05 09:41:17 -06002118 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002119}
2120
2121// Retrieve pipeline node ptr for given pipeline object
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002122static PIPELINE_STATE *getPipelineState(layer_data const *my_data, VkPipeline pipeline) {
Chris Forbes4e16d882016-05-06 15:54:55 +12002123 auto it = my_data->pipelineMap.find(pipeline);
2124 if (it == my_data->pipelineMap.end()) {
2125 return nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002126 }
Chris Forbes4e16d882016-05-06 15:54:55 +12002127 return it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002128}
2129
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06002130static RENDER_PASS_STATE *getRenderPassState(layer_data const *my_data, VkRenderPass renderpass) {
Chris Forbes967c4682016-05-17 11:36:23 +12002131 auto it = my_data->renderPassMap.find(renderpass);
2132 if (it == my_data->renderPassMap.end()) {
2133 return nullptr;
2134 }
Chris Forbesef730462016-09-27 12:03:31 +13002135 return it->second.get();
Chris Forbes967c4682016-05-17 11:36:23 +12002136}
2137
Tobin Ehlis04c04272016-10-12 11:54:09 -06002138static FRAMEBUFFER_STATE *getFramebufferState(const layer_data *my_data, VkFramebuffer framebuffer) {
Chris Forbes05e03b72016-05-17 15:27:58 +12002139 auto it = my_data->frameBufferMap.find(framebuffer);
2140 if (it == my_data->frameBufferMap.end()) {
2141 return nullptr;
2142 }
Tobin Ehlis82d2db32016-06-22 08:29:24 -06002143 return it->second.get();
Chris Forbes05e03b72016-05-17 15:27:58 +12002144}
2145
Tobin Ehlis815e8132016-06-02 13:02:17 -06002146cvdescriptorset::DescriptorSetLayout const *getDescriptorSetLayout(layer_data const *my_data, VkDescriptorSetLayout dsLayout) {
Chris Forbes056d9922016-05-20 17:04:07 +12002147 auto it = my_data->descriptorSetLayoutMap.find(dsLayout);
2148 if (it == my_data->descriptorSetLayoutMap.end()) {
2149 return nullptr;
2150 }
2151 return it->second;
2152}
2153
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06002154static PIPELINE_LAYOUT_NODE const *getPipelineLayout(layer_data const *my_data, VkPipelineLayout pipeLayout) {
Chris Forbes81d95212016-05-20 18:27:28 +12002155 auto it = my_data->pipelineLayoutMap.find(pipeLayout);
2156 if (it == my_data->pipelineLayoutMap.end()) {
2157 return nullptr;
2158 }
2159 return &it->second;
2160}
2161
Dustin Graves8f1eab92016-04-05 09:41:17 -06002162// Return true if for a given PSO, the given state enum is dynamic, else return false
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002163static bool isDynamic(const PIPELINE_STATE *pPipeline, const VkDynamicState state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002164 if (pPipeline && pPipeline->graphicsPipelineCI.pDynamicState) {
2165 for (uint32_t i = 0; i < pPipeline->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
2166 if (state == pPipeline->graphicsPipelineCI.pDynamicState->pDynamicStates[i])
Dustin Graves8f1eab92016-04-05 09:41:17 -06002167 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002168 }
2169 }
Dustin Graves8f1eab92016-04-05 09:41:17 -06002170 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002171}
2172
2173// Validate state stored as flags at time of draw call
Jeremy Hayese2583052016-12-12 11:01:28 -07002174static bool validate_draw_state_flags(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe, bool indexed,
2175 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
Chris Forbesb2ba95b2016-09-16 17:11:50 +12002176 bool result = false;
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06002177 if (pPipe->graphicsPipelineCI.pInputAssemblyState &&
2178 ((pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_LIST) ||
2179 (pPipe->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_LINE_STRIP))) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002180 result |= validate_status(dev_data, pCB, CBSTATUS_LINE_WIDTH_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002181 "Dynamic line width state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002182 }
Dustin Gravesbd9c1a92016-04-05 15:15:40 -06002183 if (pPipe->graphicsPipelineCI.pRasterizationState &&
2184 (pPipe->graphicsPipelineCI.pRasterizationState->depthBiasEnable == VK_TRUE)) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002185 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BIAS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002186 "Dynamic depth bias state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002187 }
2188 if (pPipe->blendConstantsEnabled) {
2189 result |= validate_status(dev_data, pCB, CBSTATUS_BLEND_CONSTANTS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002190 "Dynamic blend constants state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002191 }
Dustin Gravesbd9c1a92016-04-05 15:15:40 -06002192 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2193 (pPipe->graphicsPipelineCI.pDepthStencilState->depthBoundsTestEnable == VK_TRUE)) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002194 result |= validate_status(dev_data, pCB, CBSTATUS_DEPTH_BOUNDS_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002195 "Dynamic depth bounds state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002196 }
Dustin Gravesbd9c1a92016-04-05 15:15:40 -06002197 if (pPipe->graphicsPipelineCI.pDepthStencilState &&
2198 (pPipe->graphicsPipelineCI.pDepthStencilState->stencilTestEnable == VK_TRUE)) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002199 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_READ_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002200 "Dynamic stencil read mask state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002201 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_WRITE_MASK_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002202 "Dynamic stencil write mask state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002203 result |= validate_status(dev_data, pCB, CBSTATUS_STENCIL_REFERENCE_SET, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002204 "Dynamic stencil reference state not set for this command buffer", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002205 }
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07002206 if (indexed) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002207 result |= validate_status(dev_data, pCB, CBSTATUS_INDEX_BUFFER_BOUND, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Jeremy Hayese2583052016-12-12 11:01:28 -07002208 "Index buffer object not bound to this command buffer when Indexed Draw attempted", msg_code);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06002209 }
Jeremy Hayese2583052016-12-12 11:01:28 -07002210
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002211 return result;
2212}
2213
2214// Verify attachment reference compatibility according to spec
2215// If one array is larger, treat missing elements of shorter array as VK_ATTACHMENT_UNUSED & other array much match this
Mark Lobodzinski4c94c282016-06-20 18:49:25 -06002216// If both AttachmentReference arrays have requested index, check their corresponding AttachmentDescriptions
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002217// to make sure that format and samples counts match.
2218// If not, they are not compatible.
2219static bool attachment_references_compatible(const uint32_t index, const VkAttachmentReference *pPrimary,
2220 const uint32_t primaryCount, const VkAttachmentDescription *pPrimaryAttachments,
2221 const VkAttachmentReference *pSecondary, const uint32_t secondaryCount,
2222 const VkAttachmentDescription *pSecondaryAttachments) {
Tobin Ehlisc231aae2016-06-10 02:36:25 -06002223 // Check potential NULL cases first to avoid nullptr issues later
2224 if (pPrimary == nullptr) {
2225 if (pSecondary == nullptr) {
2226 return true;
2227 }
2228 return false;
2229 } else if (pSecondary == nullptr) {
2230 return false;
2231 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002232 if (index >= primaryCount) { // Check secondary as if primary is VK_ATTACHMENT_UNUSED
Mark Youngeeafb152016-03-24 10:14:35 -06002233 if (VK_ATTACHMENT_UNUSED == pSecondary[index].attachment)
2234 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002235 } else if (index >= secondaryCount) { // Check primary as if secondary is VK_ATTACHMENT_UNUSED
Mark Youngeeafb152016-03-24 10:14:35 -06002236 if (VK_ATTACHMENT_UNUSED == pPrimary[index].attachment)
2237 return true;
Mark Lobodzinski4c94c282016-06-20 18:49:25 -06002238 } else { // Format and sample count must match
2239 if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) && (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2240 return true;
2241 } else if ((pPrimary[index].attachment == VK_ATTACHMENT_UNUSED) || (pSecondary[index].attachment == VK_ATTACHMENT_UNUSED)) {
2242 return false;
2243 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002244 if ((pPrimaryAttachments[pPrimary[index].attachment].format ==
2245 pSecondaryAttachments[pSecondary[index].attachment].format) &&
2246 (pPrimaryAttachments[pPrimary[index].attachment].samples ==
2247 pSecondaryAttachments[pSecondary[index].attachment].samples))
2248 return true;
2249 }
2250 // Format and sample counts didn't match
2251 return false;
2252}
Tobin Ehlis4ca15c72016-06-30 09:29:18 -06002253// TODO : Scrub verify_renderpass_compatibility() and validateRenderPassCompatibility() and unify them and/or share code
Tobin Ehlisc677a092016-06-27 12:57:05 -06002254// For given primary RenderPass object and secondry RenderPassCreateInfo, verify that they're compatible
Tobin Ehlis4ca15c72016-06-30 09:29:18 -06002255static bool verify_renderpass_compatibility(const layer_data *my_data, const VkRenderPassCreateInfo *primaryRPCI,
Tobin Ehlisc677a092016-06-27 12:57:05 -06002256 const VkRenderPassCreateInfo *secondaryRPCI, string &errorMsg) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002257 if (primaryRPCI->subpassCount != secondaryRPCI->subpassCount) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002258 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002259 errorStr << "RenderPass for primary cmdBuffer has " << primaryRPCI->subpassCount
2260 << " subpasses but renderPass for secondary cmdBuffer has " << secondaryRPCI->subpassCount << " subpasses.";
2261 errorMsg = errorStr.str();
2262 return false;
2263 }
2264 uint32_t spIndex = 0;
2265 for (spIndex = 0; spIndex < primaryRPCI->subpassCount; ++spIndex) {
2266 // For each subpass, verify that corresponding color, input, resolve & depth/stencil attachment references are compatible
2267 uint32_t primaryColorCount = primaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2268 uint32_t secondaryColorCount = secondaryRPCI->pSubpasses[spIndex].colorAttachmentCount;
2269 uint32_t colorMax = std::max(primaryColorCount, secondaryColorCount);
2270 for (uint32_t cIdx = 0; cIdx < colorMax; ++cIdx) {
2271 if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pColorAttachments, primaryColorCount,
2272 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pColorAttachments,
2273 secondaryColorCount, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002274 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002275 errorStr << "color attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2276 errorMsg = errorStr.str();
2277 return false;
2278 } else if (!attachment_references_compatible(cIdx, primaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2279 primaryColorCount, primaryRPCI->pAttachments,
2280 secondaryRPCI->pSubpasses[spIndex].pResolveAttachments,
2281 secondaryColorCount, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002282 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002283 errorStr << "resolve attachments at index " << cIdx << " of subpass index " << spIndex << " are not compatible.";
2284 errorMsg = errorStr.str();
2285 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002286 }
2287 }
Chris Forbesb442e562016-04-11 18:32:23 +12002288
2289 if (!attachment_references_compatible(0, primaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2290 1, primaryRPCI->pAttachments,
2291 secondaryRPCI->pSubpasses[spIndex].pDepthStencilAttachment,
2292 1, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002293 stringstream errorStr;
Chris Forbesb442e562016-04-11 18:32:23 +12002294 errorStr << "depth/stencil attachments of subpass index " << spIndex << " are not compatible.";
2295 errorMsg = errorStr.str();
2296 return false;
2297 }
2298
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002299 uint32_t primaryInputCount = primaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2300 uint32_t secondaryInputCount = secondaryRPCI->pSubpasses[spIndex].inputAttachmentCount;
2301 uint32_t inputMax = std::max(primaryInputCount, secondaryInputCount);
2302 for (uint32_t i = 0; i < inputMax; ++i) {
2303 if (!attachment_references_compatible(i, primaryRPCI->pSubpasses[spIndex].pInputAttachments, primaryColorCount,
2304 primaryRPCI->pAttachments, secondaryRPCI->pSubpasses[spIndex].pInputAttachments,
2305 secondaryColorCount, secondaryRPCI->pAttachments)) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002306 stringstream errorStr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002307 errorStr << "input attachments at index " << i << " of subpass index " << spIndex << " are not compatible.";
2308 errorMsg = errorStr.str();
2309 return false;
2310 }
2311 }
2312 }
2313 return true;
2314}
2315
Tobin Ehlis05be5df2016-05-05 08:25:02 -06002316// For given cvdescriptorset::DescriptorSet, verify that its Set is compatible w/ the setLayout corresponding to
2317// pipelineLayout[layoutIndex]
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07002318static bool verify_set_layout_compatibility(layer_data *my_data, const cvdescriptorset::DescriptorSet *descriptor_set,
Tobin Ehlis0fc85672016-07-07 11:06:26 -06002319 PIPELINE_LAYOUT_NODE const *pipeline_layout, const uint32_t layoutIndex,
2320 string &errorMsg) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06002321 auto num_sets = pipeline_layout->set_layouts.size();
Tobin Ehlisa382a952016-07-07 09:20:13 -06002322 if (layoutIndex >= num_sets) {
Chris Forbesba2c4662016-04-14 10:30:01 +12002323 stringstream errorStr;
Tobin Ehlis0fc85672016-07-07 11:06:26 -06002324 errorStr << "VkPipelineLayout (" << pipeline_layout->layout << ") only contains " << num_sets
2325 << " setLayouts corresponding to sets 0-" << num_sets - 1 << ", but you're attempting to bind set to index "
2326 << layoutIndex;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002327 errorMsg = errorStr.str();
2328 return false;
2329 }
Tobin Ehlis3df41292016-07-07 09:23:38 -06002330 auto layout_node = pipeline_layout->set_layouts[layoutIndex];
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07002331 return descriptor_set->IsCompatible(layout_node, &errorMsg);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002332}
2333
2334// Validate that data for each specialization entry is fully contained within the buffer.
Chris Forbes1832a772016-05-10 15:30:22 +12002335static bool validate_specialization_offsets(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *info) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002336 bool pass = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002337
2338 VkSpecializationInfo const *spec = info->pSpecializationInfo;
2339
2340 if (spec) {
2341 for (auto i = 0u; i < spec->mapEntryCount; i++) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002342 // TODO: This is a good place for VALIDATION_ERROR_00589.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002343 if (spec->pMapEntries[i].offset + spec->pMapEntries[i].size > spec->dataSize) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002344 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, 0, __LINE__,
2345 VALIDATION_ERROR_00590, "SC",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002346 "Specialization entry %u (for constant id %u) references memory outside provided "
2347 "specialization data (bytes %u.." PRINTF_SIZE_T_SPECIFIER "; " PRINTF_SIZE_T_SPECIFIER
Jeremy Hayese2583052016-12-12 11:01:28 -07002348 " bytes provided). %s.",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002349 i, spec->pMapEntries[i].constantID, spec->pMapEntries[i].offset,
Jeremy Hayese2583052016-12-12 11:01:28 -07002350 spec->pMapEntries[i].offset + spec->pMapEntries[i].size - 1, spec->dataSize,
2351 validation_error_map[VALIDATION_ERROR_00590])) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002352
Dustin Graves8f1eab92016-04-05 09:41:17 -06002353 pass = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002354 }
2355 }
2356 }
2357 }
2358
2359 return pass;
2360}
2361
Chris Forbes1832a772016-05-10 15:30:22 +12002362static bool descriptor_type_match(shader_module const *module, uint32_t type_id,
Chris Forbesb9fb5fc2016-03-18 11:21:35 +13002363 VkDescriptorType descriptor_type, unsigned &descriptor_count) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002364 auto type = module->get_def(type_id);
2365
Chris Forbesb9fb5fc2016-03-18 11:21:35 +13002366 descriptor_count = 1;
2367
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002368 // Strip off any array or ptrs. Where we remove array levels, adjust the descriptor count for each dimension.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002369 while (type.opcode() == spv::OpTypeArray || type.opcode() == spv::OpTypePointer) {
Chris Forbesa1361092016-03-18 11:26:06 +13002370 if (type.opcode() == spv::OpTypeArray) {
2371 descriptor_count *= get_constant_value(module, type.word(3));
2372 type = module->get_def(type.word(2));
2373 }
2374 else {
2375 type = module->get_def(type.word(3));
2376 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002377 }
2378
2379 switch (type.opcode()) {
2380 case spv::OpTypeStruct: {
2381 for (auto insn : *module) {
2382 if (insn.opcode() == spv::OpDecorate && insn.word(1) == type.word(1)) {
2383 if (insn.word(2) == spv::DecorationBlock) {
2384 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2385 descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
2386 } else if (insn.word(2) == spv::DecorationBufferBlock) {
2387 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2388 descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
2389 }
2390 }
2391 }
2392
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002393 // Invalid
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002394 return false;
2395 }
2396
2397 case spv::OpTypeSampler:
Chris Forbesb9e3a082016-07-19 14:50:04 +12002398 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLER ||
2399 descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002400
2401 case spv::OpTypeSampledImage:
Chris Forbesccf300b2016-03-24 14:14:45 +13002402 if (descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002403 // Slight relaxation for some GLSL historical madness: samplerBuffer doesn't really have a sampler, and a texel
2404 // buffer descriptor doesn't really provide one. Allow this slight mismatch.
Chris Forbesccf300b2016-03-24 14:14:45 +13002405 auto image_type = module->get_def(type.word(2));
2406 auto dim = image_type.word(3);
2407 auto sampled = image_type.word(7);
2408 return dim == spv::DimBuffer && sampled == 1;
2409 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002410 return descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2411
2412 case spv::OpTypeImage: {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002413 // Many descriptor types backing image types-- depends on dimension and whether the image will be used with a sampler.
2414 // SPIRV for Vulkan requires that sampled be 1 or 2 -- leaving the decision to runtime is unacceptable.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002415 auto dim = type.word(3);
2416 auto sampled = type.word(7);
2417
2418 if (dim == spv::DimSubpassData) {
2419 return descriptor_type == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
2420 } else if (dim == spv::DimBuffer) {
2421 if (sampled == 1) {
2422 return descriptor_type == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER;
2423 } else {
2424 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
2425 }
2426 } else if (sampled == 1) {
Chris Forbesb9e3a082016-07-19 14:50:04 +12002427 return descriptor_type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE ||
2428 descriptor_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002429 } else {
2430 return descriptor_type == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
2431 }
2432 }
2433
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002434 // We shouldn't really see any other junk types -- but if we do, they're a mismatch.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002435 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002436 return false; // Mismatch
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002437 }
2438}
2439
Chris Forbes34bbe942016-05-10 16:47:02 +12002440static bool require_feature(debug_report_data *report_data, VkBool32 feature, char const *feature_name) {
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002441 if (!feature) {
Chris Forbes34bbe942016-05-10 16:47:02 +12002442 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002443 __LINE__, SHADER_CHECKER_FEATURE_NOT_ENABLED, "SC",
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002444 "Shader requires VkPhysicalDeviceFeatures::%s but is not "
2445 "enabled on the device",
2446 feature_name)) {
2447 return false;
2448 }
2449 }
2450
2451 return true;
2452}
2453
Chris Forbesc9b826c2016-05-13 13:17:42 +12002454static bool validate_shader_capabilities(debug_report_data *report_data, shader_module const *src,
2455 VkPhysicalDeviceFeatures const *enabledFeatures) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002456 bool pass = true;
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002457
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002458
2459 for (auto insn : *src) {
2460 if (insn.opcode() == spv::OpCapability) {
2461 switch (insn.word(1)) {
2462 case spv::CapabilityMatrix:
2463 case spv::CapabilityShader:
2464 case spv::CapabilityInputAttachment:
2465 case spv::CapabilitySampled1D:
2466 case spv::CapabilityImage1D:
2467 case spv::CapabilitySampledBuffer:
2468 case spv::CapabilityImageBuffer:
2469 case spv::CapabilityImageQuery:
2470 case spv::CapabilityDerivativeControl:
2471 // Always supported by a Vulkan 1.0 implementation -- no feature bits.
2472 break;
2473
2474 case spv::CapabilityGeometry:
Chris Forbes34bbe942016-05-10 16:47:02 +12002475 pass &= require_feature(report_data, enabledFeatures->geometryShader, "geometryShader");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002476 break;
2477
2478 case spv::CapabilityTessellation:
Chris Forbes34bbe942016-05-10 16:47:02 +12002479 pass &= require_feature(report_data, enabledFeatures->tessellationShader, "tessellationShader");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002480 break;
2481
2482 case spv::CapabilityFloat64:
Chris Forbes34bbe942016-05-10 16:47:02 +12002483 pass &= require_feature(report_data, enabledFeatures->shaderFloat64, "shaderFloat64");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002484 break;
2485
2486 case spv::CapabilityInt64:
Chris Forbes34bbe942016-05-10 16:47:02 +12002487 pass &= require_feature(report_data, enabledFeatures->shaderInt64, "shaderInt64");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002488 break;
2489
2490 case spv::CapabilityTessellationPointSize:
2491 case spv::CapabilityGeometryPointSize:
Chris Forbes34bbe942016-05-10 16:47:02 +12002492 pass &= require_feature(report_data, enabledFeatures->shaderTessellationAndGeometryPointSize,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002493 "shaderTessellationAndGeometryPointSize");
2494 break;
2495
2496 case spv::CapabilityImageGatherExtended:
Chris Forbes34bbe942016-05-10 16:47:02 +12002497 pass &= require_feature(report_data, enabledFeatures->shaderImageGatherExtended, "shaderImageGatherExtended");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002498 break;
2499
2500 case spv::CapabilityStorageImageMultisample:
Chris Forbes34bbe942016-05-10 16:47:02 +12002501 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002502 break;
2503
2504 case spv::CapabilityUniformBufferArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002505 pass &= require_feature(report_data, enabledFeatures->shaderUniformBufferArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002506 "shaderUniformBufferArrayDynamicIndexing");
2507 break;
2508
2509 case spv::CapabilitySampledImageArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002510 pass &= require_feature(report_data, enabledFeatures->shaderSampledImageArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002511 "shaderSampledImageArrayDynamicIndexing");
2512 break;
2513
2514 case spv::CapabilityStorageBufferArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002515 pass &= require_feature(report_data, enabledFeatures->shaderStorageBufferArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002516 "shaderStorageBufferArrayDynamicIndexing");
2517 break;
2518
2519 case spv::CapabilityStorageImageArrayDynamicIndexing:
Chris Forbes34bbe942016-05-10 16:47:02 +12002520 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageArrayDynamicIndexing,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002521 "shaderStorageImageArrayDynamicIndexing");
2522 break;
2523
2524 case spv::CapabilityClipDistance:
Chris Forbes34bbe942016-05-10 16:47:02 +12002525 pass &= require_feature(report_data, enabledFeatures->shaderClipDistance, "shaderClipDistance");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002526 break;
2527
2528 case spv::CapabilityCullDistance:
Chris Forbes34bbe942016-05-10 16:47:02 +12002529 pass &= require_feature(report_data, enabledFeatures->shaderCullDistance, "shaderCullDistance");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002530 break;
2531
2532 case spv::CapabilityImageCubeArray:
Chris Forbes34bbe942016-05-10 16:47:02 +12002533 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002534 break;
2535
2536 case spv::CapabilitySampleRateShading:
Chris Forbes34bbe942016-05-10 16:47:02 +12002537 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002538 break;
2539
2540 case spv::CapabilitySparseResidency:
Chris Forbes34bbe942016-05-10 16:47:02 +12002541 pass &= require_feature(report_data, enabledFeatures->shaderResourceResidency, "shaderResourceResidency");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002542 break;
2543
2544 case spv::CapabilityMinLod:
Chris Forbes34bbe942016-05-10 16:47:02 +12002545 pass &= require_feature(report_data, enabledFeatures->shaderResourceMinLod, "shaderResourceMinLod");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002546 break;
2547
2548 case spv::CapabilitySampledCubeArray:
Chris Forbes34bbe942016-05-10 16:47:02 +12002549 pass &= require_feature(report_data, enabledFeatures->imageCubeArray, "imageCubeArray");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002550 break;
2551
2552 case spv::CapabilityImageMSArray:
Chris Forbes34bbe942016-05-10 16:47:02 +12002553 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageMultisample, "shaderStorageImageMultisample");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002554 break;
2555
2556 case spv::CapabilityStorageImageExtendedFormats:
Chris Forbes34bbe942016-05-10 16:47:02 +12002557 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageExtendedFormats,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002558 "shaderStorageImageExtendedFormats");
2559 break;
2560
2561 case spv::CapabilityInterpolationFunction:
Chris Forbes34bbe942016-05-10 16:47:02 +12002562 pass &= require_feature(report_data, enabledFeatures->sampleRateShading, "sampleRateShading");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002563 break;
2564
2565 case spv::CapabilityStorageImageReadWithoutFormat:
Chris Forbes34bbe942016-05-10 16:47:02 +12002566 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageReadWithoutFormat,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002567 "shaderStorageImageReadWithoutFormat");
2568 break;
2569
2570 case spv::CapabilityStorageImageWriteWithoutFormat:
Chris Forbes34bbe942016-05-10 16:47:02 +12002571 pass &= require_feature(report_data, enabledFeatures->shaderStorageImageWriteWithoutFormat,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002572 "shaderStorageImageWriteWithoutFormat");
2573 break;
2574
2575 case spv::CapabilityMultiViewport:
Chris Forbes34bbe942016-05-10 16:47:02 +12002576 pass &= require_feature(report_data, enabledFeatures->multiViewport, "multiViewport");
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002577 break;
2578
2579 default:
Chris Forbes34bbe942016-05-10 16:47:02 +12002580 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002581 __LINE__, SHADER_CHECKER_BAD_CAPABILITY, "SC",
2582 "Shader declares capability %u, not supported in Vulkan.",
2583 insn.word(1)))
Dustin Graves8f1eab92016-04-05 09:41:17 -06002584 pass = false;
Chris Forbesc3a1dbc2016-03-15 10:12:48 +13002585 break;
2586 }
2587 }
2588 }
2589
2590 return pass;
2591}
2592
Chris Forbesc7090a82016-07-25 18:10:41 +12002593
2594static uint32_t descriptor_type_to_reqs(shader_module const *module, uint32_t type_id) {
Chris Forbes0b389472016-07-25 18:12:05 +12002595 auto type = module->get_def(type_id);
2596
2597 while (true) {
2598 switch (type.opcode()) {
2599 case spv::OpTypeArray:
2600 case spv::OpTypeSampledImage:
2601 type = module->get_def(type.word(2));
2602 break;
2603 case spv::OpTypePointer:
2604 type = module->get_def(type.word(3));
2605 break;
2606 case spv::OpTypeImage: {
2607 auto dim = type.word(3);
2608 auto arrayed = type.word(5);
2609 auto msaa = type.word(6);
2610
2611 switch (dim) {
2612 case spv::Dim1D:
Chris Forbes0d250c22016-08-02 08:08:16 +12002613 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_1D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_1D;
Chris Forbes0b389472016-07-25 18:12:05 +12002614 case spv::Dim2D:
2615 return (msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE) |
Chris Forbes0d250c22016-08-02 08:08:16 +12002616 (arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_2D_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_2D);
Chris Forbes0b389472016-07-25 18:12:05 +12002617 case spv::Dim3D:
2618 return DESCRIPTOR_REQ_VIEW_TYPE_3D;
2619 case spv::DimCube:
Chris Forbes0d250c22016-08-02 08:08:16 +12002620 return arrayed ? DESCRIPTOR_REQ_VIEW_TYPE_CUBE_ARRAY : DESCRIPTOR_REQ_VIEW_TYPE_CUBE;
Chris Forbes48a1b2e2016-08-31 11:59:48 -07002621 case spv::DimSubpassData:
2622 return msaa ? DESCRIPTOR_REQ_MULTI_SAMPLE : DESCRIPTOR_REQ_SINGLE_SAMPLE;
2623 default: // buffer, etc.
Chris Forbes0b389472016-07-25 18:12:05 +12002624 return 0;
2625 }
2626 }
2627 default:
2628 return 0;
2629 }
2630 }
Chris Forbesc7090a82016-07-25 18:10:41 +12002631}
2632
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002633static bool
2634validate_pipeline_shader_stage(debug_report_data *report_data, VkPipelineShaderStageCreateInfo const *pStage,
2635 PIPELINE_STATE *pipeline, shader_module **out_module, spirv_inst_iter *out_entrypoint,
2636 VkPhysicalDeviceFeatures const *enabledFeatures,
2637 std::unordered_map<VkShaderModule, std::unique_ptr<shader_module>> const &shaderModuleMap) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002638 bool pass = true;
Chris Forbesc9b826c2016-05-13 13:17:42 +12002639 auto module_it = shaderModuleMap.find(pStage->module);
2640 auto module = *out_module = module_it->second.get();
Chris Forbes961cee72016-03-30 12:12:01 +13002641
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002642 // Find the entrypoint
Chris Forbes961cee72016-03-30 12:12:01 +13002643 auto entrypoint = *out_entrypoint = find_entrypoint(module, pStage->pName, pStage->stage);
2644 if (entrypoint == module->end()) {
Jeremy Hayese2583052016-12-12 11:01:28 -07002645 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__, VALIDATION_ERROR_00510,
2646 "SC", "No entrypoint found named `%s` for stage %s. %s.", pStage->pName,
2647 string_VkShaderStageFlagBits(pStage->stage), validation_error_map[VALIDATION_ERROR_00510])) {
Chris Forbes1a89b3f2016-09-09 11:23:39 +12002648 return false; // no point continuing beyond here, any analysis is just going to be garbage.
Chris Forbes961cee72016-03-30 12:12:01 +13002649 }
2650 }
2651
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002652 // Validate shader capabilities against enabled device features
Chris Forbes19b60582016-05-13 13:27:58 +12002653 pass &= validate_shader_capabilities(report_data, module, enabledFeatures);
Chris Forbes961cee72016-03-30 12:12:01 +13002654
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002655 // Mark accessible ids
Chris Forbesd68e3202016-08-23 13:04:34 +12002656 auto accessible_ids = mark_accessible_ids(module, entrypoint);
Chris Forbes961cee72016-03-30 12:12:01 +13002657
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002658 // Validate descriptor set layout against what the entrypoint actually uses
Chris Forbesd68e3202016-08-23 13:04:34 +12002659 auto descriptor_uses = collect_interface_by_descriptor_slot(report_data, module, accessible_ids);
Chris Forbes961cee72016-03-30 12:12:01 +13002660
Tobin Ehlis97f401a2016-07-07 08:12:03 -06002661 auto pipelineLayout = pipeline->pipeline_layout;
Chris Forbes337dbbb2016-05-13 13:39:17 +12002662
Chris Forbes1a89b3f2016-09-09 11:23:39 +12002663 pass &= validate_specialization_offsets(report_data, pStage);
Tobin Ehlis3df41292016-07-07 09:23:38 -06002664 pass &= validate_push_constant_usage(report_data, &pipelineLayout.push_constant_ranges, module, accessible_ids, pStage->stage);
Chris Forbes961cee72016-03-30 12:12:01 +13002665
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002666 // Validate descriptor use
Chris Forbes961cee72016-03-30 12:12:01 +13002667 for (auto use : descriptor_uses) {
2668 // While validating shaders capture which slots are used by the pipeline
Chris Forbesc7090a82016-07-25 18:10:41 +12002669 auto & reqs = pipeline->active_slots[use.first.first][use.first.second];
2670 reqs = descriptor_req(reqs | descriptor_type_to_reqs(module, use.second.type_id));
Chris Forbes961cee72016-03-30 12:12:01 +13002671
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002672 // Verify given pipelineLayout has requested setLayout with requested binding
Tobin Ehlisb73587d2016-07-07 08:14:48 -06002673 const auto &binding = get_descriptor_binding(&pipelineLayout, use.first);
Chris Forbes961cee72016-03-30 12:12:01 +13002674 unsigned required_descriptor_count;
2675
2676 if (!binding) {
Chris Forbes19b60582016-05-13 13:27:58 +12002677 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Chris Forbesce5be902016-03-30 13:14:22 +13002678 __LINE__, SHADER_CHECKER_MISSING_DESCRIPTOR, "SC",
Chris Forbes961cee72016-03-30 12:12:01 +13002679 "Shader uses descriptor slot %u.%u (used as type `%s`) but not declared in pipeline layout",
2680 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str())) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002681 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002682 }
2683 } else if (~binding->stageFlags & pStage->stage) {
Chris Forbes19b60582016-05-13 13:27:58 +12002684 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002685 0, __LINE__, SHADER_CHECKER_DESCRIPTOR_NOT_ACCESSIBLE_FROM_STAGE, "SC",
Chris Forbes961cee72016-03-30 12:12:01 +13002686 "Shader uses descriptor slot %u.%u (used "
2687 "as type `%s`) but descriptor not "
2688 "accessible from stage %s",
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002689 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
Chris Forbes961cee72016-03-30 12:12:01 +13002690 string_VkShaderStageFlagBits(pStage->stage))) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002691 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002692 }
Chris Forbes1832a772016-05-10 15:30:22 +12002693 } else if (!descriptor_type_match(module, use.second.type_id, binding->descriptorType,
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002694 required_descriptor_count)) {
Chris Forbes19b60582016-05-13 13:27:58 +12002695 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002696 SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC", "Type mismatch on descriptor slot "
2697 "%u.%u (used as type `%s`) but "
2698 "descriptor of type %s",
2699 use.first.first, use.first.second, describe_type(module, use.second.type_id).c_str(),
Chris Forbes961cee72016-03-30 12:12:01 +13002700 string_VkDescriptorType(binding->descriptorType))) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002701 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002702 }
2703 } else if (binding->descriptorCount < required_descriptor_count) {
Chris Forbes19b60582016-05-13 13:27:58 +12002704 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002705 SHADER_CHECKER_DESCRIPTOR_TYPE_MISMATCH, "SC",
Chris Forbes961cee72016-03-30 12:12:01 +13002706 "Shader expects at least %u descriptors for binding %u.%u (used as type `%s`) but only %u provided",
2707 required_descriptor_count, use.first.first, use.first.second,
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06002708 describe_type(module, use.second.type_id).c_str(), binding->descriptorCount)) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06002709 pass = false;
Chris Forbes961cee72016-03-30 12:12:01 +13002710 }
2711 }
2712 }
2713
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07002714 // Validate use of input attachments against subpass structure
Chris Forbes8420fea2016-08-22 15:20:11 +12002715 if (pStage->stage == VK_SHADER_STAGE_FRAGMENT_BIT) {
Chris Forbesd68e3202016-08-23 13:04:34 +12002716 auto input_attachment_uses = collect_interface_by_input_attachment_index(report_data, module, accessible_ids);
Chris Forbes8420fea2016-08-22 15:20:11 +12002717
2718 auto rpci = pipeline->render_pass_ci.ptr();
2719 auto subpass = pipeline->graphicsPipelineCI.subpass;
2720
2721 for (auto use : input_attachment_uses) {
2722 auto input_attachments = rpci->pSubpasses[subpass].pInputAttachments;
2723 auto index = (input_attachments && use.first < rpci->pSubpasses[subpass].inputAttachmentCount) ?
2724 input_attachments[use.first].attachment : VK_ATTACHMENT_UNUSED;
2725
2726 if (index == VK_ATTACHMENT_UNUSED) {
2727 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2728 SHADER_CHECKER_MISSING_INPUT_ATTACHMENT, "SC",
2729 "Shader consumes input attachment index %d but not provided in subpass",
2730 use.first)) {
2731 pass = false;
2732 }
2733 }
Chris Forbes74e7bf52016-08-22 16:11:22 +12002734 else if (get_format_type(rpci->pAttachments[index].format) !=
2735 get_fundamental_type(module, use.second.type_id)) {
2736 if (log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
2737 SHADER_CHECKER_INPUT_ATTACHMENT_TYPE_MISMATCH, "SC",
2738 "Subpass input attachment %u format of %s does not match type used in shader `%s`",
2739 use.first, string_VkFormat(rpci->pAttachments[index].format),
2740 describe_type(module, use.second.type_id).c_str())) {
2741 pass = false;
2742 }
2743 }
Chris Forbes8420fea2016-08-22 15:20:11 +12002744 }
2745 }
2746
Chris Forbes961cee72016-03-30 12:12:01 +13002747 return pass;
2748}
2749
2750
Tobin Ehlis81e8ca42016-03-24 09:17:25 -06002751// Validate that the shaders used by the given pipeline and store the active_slots
2752// that are actually used by the pipeline into pPipeline->active_slots
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002753static bool
2754validate_and_capture_pipeline_shader_state(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2755 VkPhysicalDeviceFeatures const *enabledFeatures,
2756 std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
Chris Forbes6f6844a2016-04-27 14:00:44 +12002757 auto pCreateInfo = pPipeline->graphicsPipelineCI.ptr();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002758 int vertex_stage = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2759 int fragment_stage = get_shader_stage_id(VK_SHADER_STAGE_FRAGMENT_BIT);
2760
2761 shader_module *shaders[5];
2762 memset(shaders, 0, sizeof(shaders));
2763 spirv_inst_iter entrypoints[5];
2764 memset(entrypoints, 0, sizeof(entrypoints));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002765 VkPipelineVertexInputStateCreateInfo const *vi = 0;
Dustin Graves8f1eab92016-04-05 09:41:17 -06002766 bool pass = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002767
2768 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
Chris Forbes6f6844a2016-04-27 14:00:44 +12002769 auto pStage = &pCreateInfo->pStages[i];
Chris Forbes961cee72016-03-30 12:12:01 +13002770 auto stage_id = get_shader_stage_id(pStage->stage);
Chris Forbes399c7342016-05-13 13:42:12 +12002771 pass &= validate_pipeline_shader_stage(report_data, pStage, pPipeline,
Chris Forbesc9b826c2016-05-13 13:17:42 +12002772 &shaders[stage_id], &entrypoints[stage_id],
2773 enabledFeatures, shaderModuleMap);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002774 }
2775
Chris Forbese7210092016-09-13 11:22:22 +12002776 // if the shader stages are no good individually, cross-stage validation is pointless.
Chris Forbesddb7d712016-09-09 11:26:20 +12002777 if (!pass)
2778 return false;
2779
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002780 vi = pCreateInfo->pVertexInputState;
2781
2782 if (vi) {
Chris Forbes399c7342016-05-13 13:42:12 +12002783 pass &= validate_vi_consistency(report_data, vi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002784 }
2785
2786 if (shaders[vertex_stage]) {
Chris Forbes399c7342016-05-13 13:42:12 +12002787 pass &= validate_vi_against_vs_inputs(report_data, vi, shaders[vertex_stage], entrypoints[vertex_stage]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002788 }
2789
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002790 int producer = get_shader_stage_id(VK_SHADER_STAGE_VERTEX_BIT);
2791 int consumer = get_shader_stage_id(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
2792
2793 while (!shaders[producer] && producer != fragment_stage) {
2794 producer++;
2795 consumer++;
2796 }
2797
2798 for (; producer != fragment_stage && consumer <= fragment_stage; consumer++) {
2799 assert(shaders[producer]);
2800 if (shaders[consumer]) {
Chris Forbes399c7342016-05-13 13:42:12 +12002801 pass &= validate_interface_between_stages(report_data,
Chris Forbes23a575d2016-03-29 16:41:07 +13002802 shaders[producer], entrypoints[producer], &shader_stage_attribs[producer],
2803 shaders[consumer], entrypoints[consumer], &shader_stage_attribs[consumer]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002804
2805 producer = consumer;
2806 }
2807 }
2808
Tobin Ehlisc677a092016-06-27 12:57:05 -06002809 if (shaders[fragment_stage]) {
Chris Forbes399c7342016-05-13 13:42:12 +12002810 pass &= validate_fs_outputs_against_render_pass(report_data, shaders[fragment_stage], entrypoints[fragment_stage],
Tobin Ehlisc677a092016-06-27 12:57:05 -06002811 pPipeline->render_pass_ci.ptr(), pCreateInfo->subpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002812 }
2813
2814 return pass;
2815}
2816
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002817static bool validate_compute_pipeline(debug_report_data *report_data, PIPELINE_STATE *pPipeline,
2818 VkPhysicalDeviceFeatures const *enabledFeatures,
2819 std::unordered_map<VkShaderModule, unique_ptr<shader_module>> const &shaderModuleMap) {
Chris Forbes6f6844a2016-04-27 14:00:44 +12002820 auto pCreateInfo = pPipeline->computePipelineCI.ptr();
Chris Forbesb029e6f2016-03-30 14:04:36 +13002821
Chris Forbesb029e6f2016-03-30 14:04:36 +13002822 shader_module *module;
2823 spirv_inst_iter entrypoint;
2824
Chris Forbes399c7342016-05-13 13:42:12 +12002825 return validate_pipeline_shader_stage(report_data, &pCreateInfo->stage, pPipeline,
Chris Forbesc9b826c2016-05-13 13:17:42 +12002826 &module, &entrypoint, enabledFeatures, shaderModuleMap);
Chris Forbesb029e6f2016-03-30 14:04:36 +13002827}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002828// Return Set node ptr for specified set or else NULL
Tobin Ehlise83a46a2016-06-02 12:48:25 -06002829cvdescriptorset::DescriptorSet *getSetNode(const layer_data *my_data, VkDescriptorSet set) {
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06002830 auto set_it = my_data->setMap.find(set);
2831 if (set_it == my_data->setMap.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002832 return NULL;
2833 }
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06002834 return set_it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002835}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07002836
Mark Young29927482016-05-04 14:38:51 -06002837// For given pipeline, return number of MSAA samples, or one if MSAA disabled
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002838static VkSampleCountFlagBits getNumSamples(PIPELINE_STATE const *pipe) {
Chris Forbes4e16d882016-05-06 15:54:55 +12002839 if (pipe->graphicsPipelineCI.pMultisampleState != NULL &&
2840 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO == pipe->graphicsPipelineCI.pMultisampleState->sType) {
Mark Young29927482016-05-04 14:38:51 -06002841 return pipe->graphicsPipelineCI.pMultisampleState->rasterizationSamples;
2842 }
2843 return VK_SAMPLE_COUNT_1_BIT;
2844}
2845
Chris Forbes5fc77832016-07-28 14:15:38 +12002846static void list_bits(std::ostream& s, uint32_t bits) {
2847 for (int i = 0; i < 32 && bits; i++) {
2848 if (bits & (1 << i)) {
2849 s << i;
2850 bits &= ~(1 << i);
2851 if (bits) {
2852 s << ",";
2853 }
2854 }
2855 }
2856}
2857
Mark Young29927482016-05-04 14:38:51 -06002858// Validate draw-time state related to the PSO
Tobin Ehlis288cb7e2016-12-21 08:30:22 -07002859static bool ValidatePipelineDrawtimeState(layer_data const *my_data, LAST_BOUND_STATE const &state, const GLOBAL_CB_NODE *pCB,
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002860 PIPELINE_STATE const *pPipeline) {
Mark Young29927482016-05-04 14:38:51 -06002861 bool skip_call = false;
Mark Young29927482016-05-04 14:38:51 -06002862
Mike Weiblencce7ec72016-10-17 19:33:05 -06002863 // Verify vertex binding
Chris Forbesdbc66322016-05-31 16:33:48 +12002864 if (pPipeline->vertexBindingDescriptions.size() > 0) {
2865 for (size_t i = 0; i < pPipeline->vertexBindingDescriptions.size(); i++) {
Tobin Ehlis9b9fdd32016-08-03 09:59:17 -06002866 auto vertex_binding = pPipeline->vertexBindingDescriptions[i].binding;
2867 if ((pCB->currentDrawData.buffers.size() < (vertex_binding + 1)) ||
2868 (pCB->currentDrawData.buffers[vertex_binding] == VK_NULL_HANDLE)) {
2869 skip_call |= log_msg(
2870 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
2871 DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
2872 "The Pipeline State Object (0x%" PRIxLEAST64 ") expects that this Command Buffer's vertex binding Index %u "
2873 "should be set via vkCmdBindVertexBuffers. This is because VkVertexInputBindingDescription struct "
2874 "at index " PRINTF_SIZE_T_SPECIFIER " of pVertexBindingDescriptions has a binding value of %u.",
Tobin Ehlis52c76a32016-10-12 09:05:51 -06002875 (uint64_t)state.pipeline_state->pipeline, vertex_binding, i, vertex_binding);
Chris Forbesdbc66322016-05-31 16:33:48 +12002876 }
2877 }
2878 } else {
Tobin Ehlis232017e2016-12-21 10:28:54 -07002879 if (!pCB->currentDrawData.buffers.empty() && !pCB->vertex_buffer_used) {
Chris Forbesdbc66322016-05-31 16:33:48 +12002880 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
Tobin Ehliseb00b0d2016-08-17 07:55:55 -06002881 0, __LINE__, DRAWSTATE_VTX_INDEX_OUT_OF_BOUNDS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07002882 "Vertex buffers are bound to command buffer (0x%p"
Tobin Ehliseb00b0d2016-08-17 07:55:55 -06002883 ") but no vertex buffers are attached to this Pipeline State Object (0x%" PRIxLEAST64 ").",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07002884 pCB->commandBuffer, (uint64_t)state.pipeline_state->pipeline);
Chris Forbesdbc66322016-05-31 16:33:48 +12002885 }
2886 }
2887 // If Viewport or scissors are dynamic, verify that dynamic count matches PSO count.
2888 // Skip check if rasterization is disabled or there is no viewport.
2889 if ((!pPipeline->graphicsPipelineCI.pRasterizationState ||
2890 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) &&
2891 pPipeline->graphicsPipelineCI.pViewportState) {
2892 bool dynViewport = isDynamic(pPipeline, VK_DYNAMIC_STATE_VIEWPORT);
2893 bool dynScissor = isDynamic(pPipeline, VK_DYNAMIC_STATE_SCISSOR);
Chris Forbes5fc77832016-07-28 14:15:38 +12002894
Chris Forbesdbc66322016-05-31 16:33:48 +12002895 if (dynViewport) {
Chris Forbes5fc77832016-07-28 14:15:38 +12002896 auto requiredViewportsMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->viewportCount) - 1;
2897 auto missingViewportMask = ~pCB->viewportMask & requiredViewportsMask;
2898 if (missingViewportMask) {
2899 std::stringstream ss;
2900 ss << "Dynamic viewport(s) ";
2901 list_bits(ss, missingViewportMask);
Mike Weiblencce7ec72016-10-17 19:33:05 -06002902 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetViewport().";
Chris Forbes5fc77832016-07-28 14:15:38 +12002903 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2904 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2905 "%s", ss.str().c_str());
Chris Forbesdbc66322016-05-31 16:33:48 +12002906 }
2907 }
Chris Forbes5fc77832016-07-28 14:15:38 +12002908
Chris Forbesdbc66322016-05-31 16:33:48 +12002909 if (dynScissor) {
Chris Forbes5fc77832016-07-28 14:15:38 +12002910 auto requiredScissorMask = (1 << pPipeline->graphicsPipelineCI.pViewportState->scissorCount) - 1;
2911 auto missingScissorMask = ~pCB->scissorMask & requiredScissorMask;
2912 if (missingScissorMask) {
2913 std::stringstream ss;
2914 ss << "Dynamic scissor(s) ";
2915 list_bits(ss, missingScissorMask);
Mike Weiblencce7ec72016-10-17 19:33:05 -06002916 ss << " are used by pipeline state object, but were not provided via calls to vkCmdSetScissor().";
Chris Forbes5fc77832016-07-28 14:15:38 +12002917 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
2918 __LINE__, DRAWSTATE_VIEWPORT_SCISSOR_MISMATCH, "DS",
2919 "%s", ss.str().c_str());
Chris Forbesdbc66322016-05-31 16:33:48 +12002920 }
2921 }
2922 }
2923
2924 // Verify that any MSAA request in PSO matches sample# in bound FB
2925 // Skip the check if rasterization is disabled.
2926 if (!pPipeline->graphicsPipelineCI.pRasterizationState ||
2927 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
2928 VkSampleCountFlagBits pso_num_samples = getNumSamples(pPipeline);
2929 if (pCB->activeRenderPass) {
Chris Forbesef730462016-09-27 12:03:31 +13002930 auto const render_pass_info = pCB->activeRenderPass->createInfo.ptr();
Chris Forbesdbc66322016-05-31 16:33:48 +12002931 const VkSubpassDescription *subpass_desc = &render_pass_info->pSubpasses[pCB->activeSubpass];
Chris Forbesdbc66322016-05-31 16:33:48 +12002932 uint32_t i;
2933
2934 const safe_VkPipelineColorBlendStateCreateInfo *color_blend_state = pPipeline->graphicsPipelineCI.pColorBlendState;
2935 if ((color_blend_state != NULL) && (pCB->activeSubpass == pPipeline->graphicsPipelineCI.subpass) &&
2936 (color_blend_state->attachmentCount != subpass_desc->colorAttachmentCount)) {
2937 skip_call |=
Mark Young29927482016-05-04 14:38:51 -06002938 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
Chris Forbes4e16d882016-05-06 15:54:55 +12002939 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
Mark Young29927482016-05-04 14:38:51 -06002940 "Render pass subpass %u mismatch with blending state defined and blend state attachment "
Mark Muelleraab36502016-05-03 13:17:29 -06002941 "count %u while subpass color attachment count %u in Pipeline (0x%" PRIxLEAST64 ")! These "
Mark Young29927482016-05-04 14:38:51 -06002942 "must be the same at draw-time.",
2943 pCB->activeSubpass, color_blend_state->attachmentCount, subpass_desc->colorAttachmentCount,
Chris Forbes4e16d882016-05-06 15:54:55 +12002944 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
Chris Forbesdbc66322016-05-31 16:33:48 +12002945 }
2946
Chris Forbes34379222016-06-29 18:15:41 +12002947 unsigned subpass_num_samples = 0;
Chris Forbes074ec142016-06-29 10:50:41 +12002948
Chris Forbesdbc66322016-05-31 16:33:48 +12002949 for (i = 0; i < subpass_desc->colorAttachmentCount; i++) {
Chris Forbes34379222016-06-29 18:15:41 +12002950 auto attachment = subpass_desc->pColorAttachments[i].attachment;
2951 if (attachment != VK_ATTACHMENT_UNUSED)
2952 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
Chris Forbesdbc66322016-05-31 16:33:48 +12002953 }
Chris Forbes074ec142016-06-29 10:50:41 +12002954
Chris Forbes34379222016-06-29 18:15:41 +12002955 if (subpass_desc->pDepthStencilAttachment &&
2956 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
2957 auto attachment = subpass_desc->pDepthStencilAttachment->attachment;
2958 subpass_num_samples |= (unsigned)render_pass_info->pAttachments[attachment].samples;
Chris Forbesdbc66322016-05-31 16:33:48 +12002959 }
Mark Young29927482016-05-04 14:38:51 -06002960
Jamie Madill5738e272016-07-06 13:37:33 -04002961 if (subpass_num_samples && static_cast<unsigned>(pso_num_samples) != subpass_num_samples) {
Chris Forbesdbc66322016-05-31 16:33:48 +12002962 skip_call |=
Mark Young29927482016-05-04 14:38:51 -06002963 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
Chris Forbes4e16d882016-05-06 15:54:55 +12002964 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06002965 "Num samples mismatch! At draw-time in Pipeline (0x%" PRIxLEAST64
2966 ") with %u samples while current RenderPass (0x%" PRIxLEAST64 ") w/ %u samples!",
Chris Forbes4e16d882016-05-06 15:54:55 +12002967 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), pso_num_samples,
Chris Forbesa4937a72016-05-06 16:31:14 +12002968 reinterpret_cast<const uint64_t &>(pCB->activeRenderPass->renderPass), subpass_num_samples);
Mark Young29927482016-05-04 14:38:51 -06002969 }
Chris Forbesdbc66322016-05-31 16:33:48 +12002970 } else {
2971 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2972 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_NUM_SAMPLES_MISMATCH, "DS",
2973 "No active render pass found at draw-time in Pipeline (0x%" PRIxLEAST64 ")!",
2974 reinterpret_cast<const uint64_t &>(pPipeline->pipeline));
Mark Young29927482016-05-04 14:38:51 -06002975 }
Mark Young29927482016-05-04 14:38:51 -06002976 }
Tobin Ehlis09d67d02016-06-07 06:06:01 -06002977 // Verify that PSO creation renderPass is compatible with active renderPass
2978 if (pCB->activeRenderPass) {
2979 std::string err_string;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -06002980 if ((pCB->activeRenderPass->renderPass != pPipeline->graphicsPipelineCI.renderPass) &&
Chris Forbesef730462016-09-27 12:03:31 +13002981 !verify_renderpass_compatibility(my_data, pCB->activeRenderPass->createInfo.ptr(), pPipeline->render_pass_ci.ptr(),
Tobin Ehlis09d67d02016-06-07 06:06:01 -06002982 err_string)) {
2983 // renderPass that PSO was created with must be compatible with active renderPass that PSO is being used with
2984 skip_call |=
2985 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2986 reinterpret_cast<const uint64_t &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2987 "At Draw time the active render pass (0x%" PRIxLEAST64 ") is incompatible w/ gfx pipeline "
2988 "(0x%" PRIxLEAST64 ") that was created w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
Chris Forbes82494442016-10-17 15:45:49 +13002989 reinterpret_cast<uint64_t &>(pCB->activeRenderPass->renderPass),
2990 reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
Tobin Ehlis09d67d02016-06-07 06:06:01 -06002991 reinterpret_cast<const uint64_t &>(pPipeline->graphicsPipelineCI.renderPass), err_string.c_str());
2992 }
Chris Forbesa58c4522016-09-28 15:19:39 +13002993
2994 if (pPipeline->graphicsPipelineCI.subpass != pCB->activeSubpass) {
2995 skip_call |=
2996 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
2997 reinterpret_cast<uint64_t const &>(pPipeline->pipeline), __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
2998 "Pipeline was built for subpass %u but used in subpass %u", pPipeline->graphicsPipelineCI.subpass,
2999 pCB->activeSubpass);
3000 }
Tobin Ehlis09d67d02016-06-07 06:06:01 -06003001 }
Chris Forbesdbc66322016-05-31 16:33:48 +12003002 // TODO : Add more checks here
3003
Mark Young29927482016-05-04 14:38:51 -06003004 return skip_call;
3005}
3006
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003007// Validate overall state at the time of a draw call
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003008static bool ValidateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_node, const bool indexed,
Jeremy Hayese2583052016-12-12 11:01:28 -07003009 const VkPipelineBindPoint bind_point, const char *function,
3010 UNIQUE_VALIDATION_ERROR_CODE const msg_code) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06003011 bool result = false;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003012 auto const &state = cb_node->lastBound[bind_point];
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003013 PIPELINE_STATE *pPipe = state.pipeline_state;
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003014 if (nullptr == pPipe) {
3015 result |= log_msg(
3016 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
3017 DRAWSTATE_INVALID_PIPELINE, "DS",
3018 "At Draw/Dispatch time no valid VkPipeline is bound! This is illegal. Please bind one with vkCmdBindPipeline().");
3019 // Early return as any further checks below will be busted w/o a pipeline
3020 if (result)
3021 return true;
3022 }
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06003023 // First check flag states
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003024 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
Jeremy Hayese2583052016-12-12 11:01:28 -07003025 result = validate_draw_state_flags(my_data, cb_node, pPipe, indexed, msg_code);
Tobin Ehlis2e319d42016-03-25 11:49:51 -06003026
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003027 // Now complete other state checks
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003028 if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003029 string errorString;
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003030 auto pipeline_layout = pPipe->pipeline_layout;
Chris Forbese96eb492016-05-31 16:45:13 +12003031
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003032 for (const auto &set_binding_pair : pPipe->active_slots) {
3033 uint32_t setIndex = set_binding_pair.first;
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003034 // If valid set is not bound throw an error
3035 if ((state.boundDescriptorSets.size() <= setIndex) || (!state.boundDescriptorSets[setIndex])) {
3036 result |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3037 DRAWSTATE_DESCRIPTOR_SET_NOT_BOUND, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06003038 "VkPipeline 0x%" PRIxLEAST64 " uses set #%u but that set is not bound.", (uint64_t)pPipe->pipeline,
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003039 setIndex);
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003040 } else if (!verify_set_layout_compatibility(my_data, state.boundDescriptorSets[setIndex], &pipeline_layout, setIndex,
3041 errorString)) {
3042 // Set is bound but not compatible w/ overlapping pipeline_layout from PSO
Tobin Ehlis09d16712016-05-17 10:41:55 -06003043 VkDescriptorSet setHandle = state.boundDescriptorSets[setIndex]->GetSet();
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003044 result |=
3045 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3046 (uint64_t)setHandle, __LINE__, DRAWSTATE_PIPELINE_LAYOUTS_INCOMPATIBLE, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06003047 "VkDescriptorSet (0x%" PRIxLEAST64
3048 ") bound as set #%u is not compatible with overlapping VkPipelineLayout 0x%" PRIxLEAST64 " due to: %s",
Tobin Ehlis0fc85672016-07-07 11:06:26 -06003049 reinterpret_cast<uint64_t &>(setHandle), setIndex, reinterpret_cast<uint64_t &>(pipeline_layout.layout),
3050 errorString.c_str());
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003051 } else { // Valid set is bound and layout compatible, validate that it's updated
3052 // Pull the set node
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003053 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
Tobin Ehlisf9519102016-08-17 09:49:13 -06003054 // Gather active bindings
Tobin Ehlis022528b2016-12-29 12:22:32 -07003055 std::unordered_set<uint32_t> active_bindings;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003056 for (auto binding : set_binding_pair.second) {
Tobin Ehlis022528b2016-12-29 12:22:32 -07003057 active_bindings.insert(binding.first);
Tobin Ehlisf9519102016-08-17 09:49:13 -06003058 }
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003059 // Make sure set has been updated if it has no immutable samplers
3060 // If it has immutable samplers, we'll flag error later as needed depending on binding
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003061 if (!descriptor_set->IsUpdated()) {
Tobin Ehlis022528b2016-12-29 12:22:32 -07003062 for (auto binding : active_bindings) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003063 if (!descriptor_set->GetImmutableSamplerPtrFromBinding(binding)) {
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003064 result |= log_msg(
3065 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003066 (uint64_t)descriptor_set->GetSet(), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
Mike Weiblencce7ec72016-10-17 19:33:05 -06003067 "Descriptor Set 0x%" PRIxLEAST64 " bound but was never updated. It is now being used to draw so "
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003068 "this will result in undefined behavior.",
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003069 (uint64_t)descriptor_set->GetSet());
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003070 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003071 }
3072 }
Tobin Ehlis205f0032016-12-29 11:39:10 -07003073 // Validate the draw-time state for this descriptor set
3074 std::string err_str;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003075 if (!descriptor_set->ValidateDrawState(set_binding_pair.second, state.dynamicOffsets[setIndex], &err_str)) {
3076 auto set = descriptor_set->GetSet();
Tobin Ehlis205f0032016-12-29 11:39:10 -07003077 result |=
3078 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
3079 reinterpret_cast<const uint64_t &>(set), __LINE__, DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
3080 "Descriptor set 0x%" PRIxLEAST64 " encountered the following validation error at %s() time: %s",
3081 reinterpret_cast<const uint64_t &>(set), function, err_str.c_str());
3082 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003083 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003084 }
Tobin Ehlisb8b6b272016-05-02 13:26:06 -06003085 }
Mark Young29927482016-05-04 14:38:51 -06003086
3087 // Check general pipeline state that needs to be validated at drawtime
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003088 if (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point)
Tobin Ehlis288cb7e2016-12-21 08:30:22 -07003089 result |= ValidatePipelineDrawtimeState(my_data, state, cb_node, pPipe);
Mark Young29927482016-05-04 14:38:51 -06003090
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003091 return result;
3092}
3093
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003094static void UpdateDrawState(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const VkPipelineBindPoint bind_point) {
3095 auto const &state = cb_state->lastBound[bind_point];
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003096 PIPELINE_STATE *pPipe = state.pipeline_state;
3097 if (VK_NULL_HANDLE != state.pipeline_layout.layout) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003098 for (const auto &set_binding_pair : pPipe->active_slots) {
3099 uint32_t setIndex = set_binding_pair.first;
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003100 // Pull the set node
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003101 cvdescriptorset::DescriptorSet *descriptor_set = state.boundDescriptorSets[setIndex];
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003102 // Bind this set and its active descriptor resources to the command buffer
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003103 descriptor_set->BindCommandBuffer(cb_state, set_binding_pair.second);
Tobin Ehlis205f0032016-12-29 11:39:10 -07003104 // For given active slots record updated images & buffers
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07003105 descriptor_set->GetStorageUpdates(set_binding_pair.second, &cb_state->updateBuffers, &cb_state->updateImages);
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003106 }
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003107 }
Tobin Ehlis232017e2016-12-21 10:28:54 -07003108 if (pPipe->vertexBindingDescriptions.size() > 0) {
3109 cb_state->vertex_buffer_used = true;
3110 }
Tobin Ehlis276d3d32016-12-21 09:21:06 -07003111}
3112
Mark Young7394fdd2016-03-31 14:56:43 -06003113// Validate HW line width capabilities prior to setting requested line width.
3114static bool verifyLineWidth(layer_data *my_data, DRAW_STATE_ERROR dsError, const uint64_t &target, float lineWidth) {
3115 bool skip_call = false;
3116
3117 // First check to see if the physical device supports wide lines.
Chris Forbes94c5f532016-10-03 17:42:38 +13003118 if ((VK_FALSE == my_data->enabled_features.wideLines) && (1.0f != lineWidth)) {
Mark Young7394fdd2016-03-31 14:56:43 -06003119 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target, __LINE__,
3120 dsError, "DS", "Attempt to set lineWidth to %f but physical device wideLines feature "
3121 "not supported/enabled so lineWidth must be 1.0f!",
3122 lineWidth);
3123 } else {
3124 // Otherwise, make sure the width falls in the valid range.
3125 if ((my_data->phys_dev_properties.properties.limits.lineWidthRange[0] > lineWidth) ||
3126 (my_data->phys_dev_properties.properties.limits.lineWidthRange[1] < lineWidth)) {
3127 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, target,
3128 __LINE__, dsError, "DS", "Attempt to set lineWidth to %f but physical device limits line width "
3129 "to between [%f, %f]!",
3130 lineWidth, my_data->phys_dev_properties.properties.limits.lineWidthRange[0],
3131 my_data->phys_dev_properties.properties.limits.lineWidthRange[1]);
3132 }
3133 }
3134
3135 return skip_call;
3136}
3137
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003138// Verify that create state for a pipeline is valid
Mark Lobodzinski44e59992016-11-16 09:51:44 -07003139static bool verifyPipelineCreateState(layer_data *my_data, std::vector<PIPELINE_STATE *> pPipelines, int pipelineIndex) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003140 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003141
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003142 PIPELINE_STATE *pPipeline = pPipelines[pipelineIndex];
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003143
3144 // If create derivative bit is set, check that we've specified a base
3145 // pipeline correctly, and that the base pipeline was created to allow
3146 // derivatives.
3147 if (pPipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_DERIVATIVE_BIT) {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003148 PIPELINE_STATE *pBasePipeline = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003149 if (!((pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) ^
3150 (pPipeline->graphicsPipelineCI.basePipelineIndex != -1))) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003151 // This check is a superset of VALIDATION_ERROR_00526 and VALIDATION_ERROR_00528
Tobin Ehlisfe871282016-06-28 10:28:02 -06003152 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3153 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3154 "Invalid Pipeline CreateInfo: exactly one of base pipeline index and handle must be specified");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003155 } else if (pPipeline->graphicsPipelineCI.basePipelineIndex != -1) {
3156 if (pPipeline->graphicsPipelineCI.basePipelineIndex >= pipelineIndex) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003157 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003158 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003159 VALIDATION_ERROR_00518, "DS",
3160 "Invalid Pipeline CreateInfo: base pipeline must occur earlier in array than derivative pipeline. %s",
3161 validation_error_map[VALIDATION_ERROR_00518]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003162 } else {
3163 pBasePipeline = pPipelines[pPipeline->graphicsPipelineCI.basePipelineIndex];
3164 }
3165 } else if (pPipeline->graphicsPipelineCI.basePipelineHandle != VK_NULL_HANDLE) {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003166 pBasePipeline = getPipelineState(my_data, pPipeline->graphicsPipelineCI.basePipelineHandle);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003167 }
3168
3169 if (pBasePipeline && !(pBasePipeline->graphicsPipelineCI.flags & VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003170 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3171 DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3172 "Invalid Pipeline CreateInfo: base pipeline does not allow derivatives.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003173 }
3174 }
3175
3176 if (pPipeline->graphicsPipelineCI.pColorBlendState != NULL) {
Chris Forbes94c5f532016-10-03 17:42:38 +13003177 if (!my_data->enabled_features.independentBlend) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06003178 if (pPipeline->attachments.size() > 1) {
Chris Forbes1ae3d402016-03-24 11:42:09 +13003179 VkPipelineColorBlendAttachmentState *pAttachments = &pPipeline->attachments[0];
Mark Lobodzinski2f02dc92016-03-23 14:34:52 -06003180 for (size_t i = 1; i < pPipeline->attachments.size(); i++) {
Mark Mueller35d41312016-07-13 14:49:35 -06003181 // Quoting the spec: "If [the independent blend] feature is not enabled, the VkPipelineColorBlendAttachmentState
3182 // settings for all color attachments must be identical." VkPipelineColorBlendAttachmentState contains
3183 // only attachment state, so memcmp is best suited for the comparison
3184 if (memcmp(static_cast<const void *>(pAttachments), static_cast<const void *>(&pAttachments[i]),
3185 sizeof(pAttachments[0]))) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003186 skip_call |=
3187 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3188 VALIDATION_ERROR_01532, "DS", "Invalid Pipeline CreateInfo: If independent blend feature not "
3189 "enabled, all elements of pAttachments must be identical. %s",
3190 validation_error_map[VALIDATION_ERROR_01532]);
Mark Mueller35d41312016-07-13 14:49:35 -06003191 break;
Mark Lobodzinski2f02dc92016-03-23 14:34:52 -06003192 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003193 }
3194 }
3195 }
Chris Forbes94c5f532016-10-03 17:42:38 +13003196 if (!my_data->enabled_features.logicOp &&
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003197 (pPipeline->graphicsPipelineCI.pColorBlendState->logicOpEnable != VK_FALSE)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003198 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003199 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003200 VALIDATION_ERROR_01533, "DS",
3201 "Invalid Pipeline CreateInfo: If logic operations feature not enabled, logicOpEnable must be VK_FALSE. %s",
3202 validation_error_map[VALIDATION_ERROR_01533]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003203 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003204 }
3205
Tobin Ehlis81e8ca42016-03-24 09:17:25 -06003206 // Ensure the subpass index is valid. If not, then validate_and_capture_pipeline_shader_state
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003207 // produces nonsense errors that confuse users. Other layers should already
3208 // emit errors for renderpass being invalid.
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06003209 auto renderPass = getRenderPassState(my_data, pPipeline->graphicsPipelineCI.renderPass);
Chris Forbesef730462016-09-27 12:03:31 +13003210 if (renderPass && pPipeline->graphicsPipelineCI.subpass >= renderPass->createInfo.subpassCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003211 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003212 VALIDATION_ERROR_02122, "DS", "Invalid Pipeline CreateInfo State: Subpass index %u "
3213 "is out of range for this renderpass (0..%u). %s",
3214 pPipeline->graphicsPipelineCI.subpass, renderPass->createInfo.subpassCount - 1,
3215 validation_error_map[VALIDATION_ERROR_02122]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003216 }
3217
Chris Forbes94c5f532016-10-03 17:42:38 +13003218 if (!validate_and_capture_pipeline_shader_state(my_data->report_data, pPipeline, &my_data->enabled_features,
Chris Forbesc9b826c2016-05-13 13:17:42 +12003219 my_data->shaderModuleMap)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003220 skip_call = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003221 }
Chris Forbes3001c772016-04-06 11:21:28 +12003222 // Each shader's stage must be unique
3223 if (pPipeline->duplicate_shaders) {
3224 for (uint32_t stage = VK_SHADER_STAGE_VERTEX_BIT; stage & VK_SHADER_STAGE_ALL_GRAPHICS; stage <<= 1) {
3225 if (pPipeline->duplicate_shaders & stage) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003226 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
3227 __LINE__, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE, "DS",
3228 "Invalid Pipeline CreateInfo State: Multiple shaders provided for stage %s",
3229 string_VkShaderStageFlagBits(VkShaderStageFlagBits(stage)));
Chris Forbes3001c772016-04-06 11:21:28 +12003230 }
3231 }
3232 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003233 // VS is required
3234 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_VERTEX_BIT)) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003235 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3236 VALIDATION_ERROR_00532, "DS", "Invalid Pipeline CreateInfo State: Vertex Shader required. %s",
3237 validation_error_map[VALIDATION_ERROR_00532]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003238 }
3239 // Either both or neither TC/TE shaders should be defined
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003240 if ((pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3241 !(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003242 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003243 VALIDATION_ERROR_00534, "DS",
3244 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3245 validation_error_map[VALIDATION_ERROR_00534]);
3246 }
3247 if (!(pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) &&
3248 (pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)) {
3249 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3250 VALIDATION_ERROR_00535, "DS",
3251 "Invalid Pipeline CreateInfo State: TE and TC shaders must be included or excluded as a pair. %s",
3252 validation_error_map[VALIDATION_ERROR_00535]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003253 }
3254 // Compute shaders should be specified independent of Gfx shaders
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003255 if (pPipeline->active_shaders & VK_SHADER_STAGE_COMPUTE_BIT) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003256 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003257 VALIDATION_ERROR_00533, "DS",
3258 "Invalid Pipeline CreateInfo State: Do not specify Compute Shader for Gfx Pipeline. %s",
3259 validation_error_map[VALIDATION_ERROR_00533]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003260 }
3261 // VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive topology is only valid for tessellation pipelines.
3262 // Mismatching primitive topology and tessellation fails graphics pipeline creation.
3263 if (pPipeline->active_shaders & (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) &&
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003264 (!pPipeline->graphicsPipelineCI.pInputAssemblyState ||
3265 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology != VK_PRIMITIVE_TOPOLOGY_PATCH_LIST)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003266 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003267 VALIDATION_ERROR_02099, "DS", "Invalid Pipeline CreateInfo State: "
3268 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST must be set as IA "
3269 "topology for tessellation pipelines. %s",
3270 validation_error_map[VALIDATION_ERROR_02099]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003271 }
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003272 if (pPipeline->graphicsPipelineCI.pInputAssemblyState &&
3273 pPipeline->graphicsPipelineCI.pInputAssemblyState->topology == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003274 if (~pPipeline->active_shaders & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003275 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003276 VALIDATION_ERROR_02100, "DS", "Invalid Pipeline CreateInfo State: "
3277 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3278 "topology is only valid for tessellation pipelines. %s",
3279 validation_error_map[VALIDATION_ERROR_02100]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003280 }
3281 }
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003282
3283 if (pPipeline->graphicsPipelineCI.pTessellationState &&
3284 ((pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints == 0) ||
3285 (pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints >
3286 my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize))) {
3287 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3288 VALIDATION_ERROR_01426, "DS", "Invalid Pipeline CreateInfo State: "
3289 "VK_PRIMITIVE_TOPOLOGY_PATCH_LIST primitive "
3290 "topology used with patchControlPoints value %u."
3291 " patchControlPoints should be >0 and <=%u. %s",
3292 pPipeline->graphicsPipelineCI.pTessellationState->patchControlPoints,
3293 my_data->phys_dev_properties.properties.limits.maxTessellationPatchSize,
3294 validation_error_map[VALIDATION_ERROR_01426]);
3295 }
3296
Mark Young7394fdd2016-03-31 14:56:43 -06003297 // If a rasterization state is provided, make sure that the line width conforms to the HW.
3298 if (pPipeline->graphicsPipelineCI.pRasterizationState) {
3299 if (!isDynamic(pPipeline, VK_DYNAMIC_STATE_LINE_WIDTH)) {
Chris Forbes82494442016-10-17 15:45:49 +13003300 skip_call |= verifyLineWidth(my_data, DRAWSTATE_INVALID_PIPELINE_CREATE_STATE,
3301 reinterpret_cast<uint64_t const &>(pPipeline->pipeline),
Tobin Ehlisfe871282016-06-28 10:28:02 -06003302 pPipeline->graphicsPipelineCI.pRasterizationState->lineWidth);
Mark Young7394fdd2016-03-31 14:56:43 -06003303 }
3304 }
Chris Forbesd39376d2016-09-08 17:55:54 +12003305
Mark Lobodzinskib23aa452016-12-16 13:06:41 -07003306 // If rasterization is not disabled and subpass uses a depth/stencil attachment, pDepthStencilState must be a pointer to a
3307 // valid structure
3308 if (pPipeline->graphicsPipelineCI.pRasterizationState &&
3309 (pPipeline->graphicsPipelineCI.pRasterizationState->rasterizerDiscardEnable == VK_FALSE)) {
Chris Forbesef730462016-09-27 12:03:31 +13003310 auto subpass_desc = renderPass ? &renderPass->createInfo.pSubpasses[pPipeline->graphicsPipelineCI.subpass] : nullptr;
Chris Forbesd39376d2016-09-08 17:55:54 +12003311 if (subpass_desc && subpass_desc->pDepthStencilAttachment &&
3312 subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
3313 if (!pPipeline->graphicsPipelineCI.pDepthStencilState) {
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003314 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
3315 0, __LINE__, VALIDATION_ERROR_02115, "DS",
Chris Forbesd39376d2016-09-08 17:55:54 +12003316 "Invalid Pipeline CreateInfo State: "
3317 "pDepthStencilState is NULL when rasterization is enabled and subpass uses a "
Mike Schuchardtf9ad3c82017-01-03 15:21:24 -07003318 "depth/stencil attachment. %s",
3319 validation_error_map[VALIDATION_ERROR_02115]);
Chris Forbesd39376d2016-09-08 17:55:54 +12003320 }
3321 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003322 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003323 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003324}
3325
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003326// Free the Pipeline nodes
3327static void deletePipelines(layer_data *my_data) {
3328 if (my_data->pipelineMap.size() <= 0)
3329 return;
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003330 for (auto &pipe_map_pair : my_data->pipelineMap) {
3331 delete pipe_map_pair.second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003332 }
3333 my_data->pipelineMap.clear();
3334}
3335
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003336// Block of code at start here specifically for managing/tracking DSs
3337
3338// Return Pool node ptr for specified pool or else NULL
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06003339DESCRIPTOR_POOL_STATE *getDescriptorPoolState(const layer_data *dev_data, const VkDescriptorPool pool) {
Tobin Ehlis71a93d32016-05-26 13:12:38 -06003340 auto pool_it = dev_data->descriptorPoolMap.find(pool);
3341 if (pool_it == dev_data->descriptorPoolMap.end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003342 return NULL;
3343 }
Tobin Ehlis71a93d32016-05-26 13:12:38 -06003344 return pool_it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003345}
3346
Dustin Graves8f1eab92016-04-05 09:41:17 -06003347// Return false if update struct is of valid type, otherwise flag error and return code from callback
3348static bool validUpdateStruct(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003349 switch (pUpdateStruct->sType) {
3350 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3351 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
Dustin Graves8f1eab92016-04-05 09:41:17 -06003352 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003353 default:
3354 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3355 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3356 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3357 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
3358 }
3359}
3360
3361// Set count for given update struct in the last parameter
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003362static uint32_t getUpdateCount(layer_data *my_data, const VkDevice device, const GENERIC_HEADER *pUpdateStruct) {
3363 switch (pUpdateStruct->sType) {
3364 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3365 return ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorCount;
3366 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
3367 // TODO : Need to understand this case better and make sure code is correct
3368 return ((VkCopyDescriptorSet *)pUpdateStruct)->descriptorCount;
3369 default:
3370 return 0;
3371 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003372}
3373
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003374// For given layout and update, return the first overall index of the layout that is updated
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003375static uint32_t getUpdateStartIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003376 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003377 return binding_start_index + arrayIndex;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003378}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003379// For given layout and update, return the last overall index of the layout that is updated
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003380static uint32_t getUpdateEndIndex(layer_data *my_data, const VkDevice device, const uint32_t binding_start_index,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003381 const uint32_t arrayIndex, const GENERIC_HEADER *pUpdateStruct) {
3382 uint32_t count = getUpdateCount(my_data, device, pUpdateStruct);
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003383 return binding_start_index + arrayIndex + count - 1;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003384}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003385// Verify that the descriptor type in the update struct matches what's expected by the layout
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003386static bool validateUpdateConsistency(layer_data *my_data, const VkDevice device, const VkDescriptorType layout_type,
Dustin Graves8f1eab92016-04-05 09:41:17 -06003387 const GENERIC_HEADER *pUpdateStruct, uint32_t startIndex, uint32_t endIndex) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003388 // First get actual type of update
Tobin Ehlisfe871282016-06-28 10:28:02 -06003389 bool skip_call = false;
Jamie Madill2b6b8d52016-04-04 15:09:51 -04003390 VkDescriptorType actualType = VK_DESCRIPTOR_TYPE_MAX_ENUM;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003391 switch (pUpdateStruct->sType) {
3392 case VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET:
3393 actualType = ((VkWriteDescriptorSet *)pUpdateStruct)->descriptorType;
3394 break;
3395 case VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07003396 // No need to validate
Dustin Graves8f1eab92016-04-05 09:41:17 -06003397 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003398 break;
3399 default:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003400 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3401 DRAWSTATE_INVALID_UPDATE_STRUCT, "DS",
3402 "Unexpected UPDATE struct of type %s (value %u) in vkUpdateDescriptors() struct tree",
3403 string_VkStructureType(pUpdateStruct->sType), pUpdateStruct->sType);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003404 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003405 if (!skip_call) {
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003406 if (layout_type != actualType) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003407 skip_call |= log_msg(
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06003408 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3409 DRAWSTATE_DESCRIPTOR_TYPE_MISMATCH, "DS",
3410 "Write descriptor update has descriptor type %s that does not match overlapping binding descriptor type of %s!",
3411 string_VkDescriptorType(actualType), string_VkDescriptorType(layout_type));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003412 }
3413 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003414 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003415}
Michael Lentinef7bf6842016-03-30 15:57:52 -05003416//TODO: Consolidate functions
3417bool FindLayout(const GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, IMAGE_CMD_BUF_LAYOUT_NODE &node, const VkImageAspectFlags aspectMask) {
3418 layer_data *my_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
3419 if (!(imgpair.subresource.aspectMask & aspectMask)) {
3420 return false;
3421 }
3422 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3423 imgpair.subresource.aspectMask = aspectMask;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003424 auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3425 if (imgsubIt == pCB->imageLayoutMap.end()) {
Michael Lentinef7bf6842016-03-30 15:57:52 -05003426 return false;
3427 }
3428 if (node.layout != VK_IMAGE_LAYOUT_MAX_ENUM && node.layout != imgsubIt->second.layout) {
3429 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3430 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3431 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3432 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.layout), string_VkImageLayout(imgsubIt->second.layout));
3433 }
3434 if (node.initialLayout != VK_IMAGE_LAYOUT_MAX_ENUM && node.initialLayout != imgsubIt->second.initialLayout) {
3435 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3436 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3437 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple initial layout types: %s and %s",
3438 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(node.initialLayout), string_VkImageLayout(imgsubIt->second.initialLayout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003439 }
3440 node = imgsubIt->second;
3441 return true;
3442}
3443
Michael Lentinef7bf6842016-03-30 15:57:52 -05003444bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout, const VkImageAspectFlags aspectMask) {
3445 if (!(imgpair.subresource.aspectMask & aspectMask)) {
3446 return false;
3447 }
3448 VkImageAspectFlags oldAspectMask = imgpair.subresource.aspectMask;
3449 imgpair.subresource.aspectMask = aspectMask;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003450 auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3451 if (imgsubIt == my_data->imageLayoutMap.end()) {
Michael Lentinef7bf6842016-03-30 15:57:52 -05003452 return false;
3453 }
3454 if (layout != VK_IMAGE_LAYOUT_MAX_ENUM && layout != imgsubIt->second.layout) {
3455 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
3456 reinterpret_cast<uint64_t&>(imgpair.image), __LINE__, DRAWSTATE_INVALID_LAYOUT, "DS",
3457 "Cannot query for VkImage 0x%" PRIx64 " layout when combined aspect mask %d has multiple layout types: %s and %s",
3458 reinterpret_cast<uint64_t&>(imgpair.image), oldAspectMask, string_VkImageLayout(layout), string_VkImageLayout(imgsubIt->second.layout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003459 }
3460 layout = imgsubIt->second.layout;
3461 return true;
3462}
3463
Michael Lentinef7bf6842016-03-30 15:57:52 -05003464// find layout(s) on the cmd buf level
3465bool FindLayout(const GLOBAL_CB_NODE *pCB, VkImage image, VkImageSubresource range, IMAGE_CMD_BUF_LAYOUT_NODE &node) {
3466 ImageSubresourcePair imgpair = {image, true, range};
3467 node = IMAGE_CMD_BUF_LAYOUT_NODE(VK_IMAGE_LAYOUT_MAX_ENUM, VK_IMAGE_LAYOUT_MAX_ENUM);
3468 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_COLOR_BIT);
3469 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_DEPTH_BIT);
3470 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_STENCIL_BIT);
3471 FindLayout(pCB, imgpair, node, VK_IMAGE_ASPECT_METADATA_BIT);
3472 if (node.layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3473 imgpair = {image, false, VkImageSubresource()};
3474 auto imgsubIt = pCB->imageLayoutMap.find(imgpair);
3475 if (imgsubIt == pCB->imageLayoutMap.end())
3476 return false;
3477 node = imgsubIt->second;
3478 }
3479 return true;
3480}
3481
3482// find layout(s) on the global level
3483bool FindLayout(const layer_data *my_data, ImageSubresourcePair imgpair, VkImageLayout &layout) {
3484 layout = VK_IMAGE_LAYOUT_MAX_ENUM;
3485 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3486 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3487 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3488 FindLayout(my_data, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
3489 if (layout == VK_IMAGE_LAYOUT_MAX_ENUM) {
3490 imgpair = {imgpair.image, false, VkImageSubresource()};
3491 auto imgsubIt = my_data->imageLayoutMap.find(imgpair);
3492 if (imgsubIt == my_data->imageLayoutMap.end())
3493 return false;
3494 layout = imgsubIt->second.layout;
3495 }
3496 return true;
3497}
3498
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003499bool FindLayout(const layer_data *my_data, VkImage image, VkImageSubresource range, VkImageLayout &layout) {
3500 ImageSubresourcePair imgpair = {image, true, range};
3501 return FindLayout(my_data, imgpair, layout);
3502}
3503
3504bool FindLayouts(const layer_data *my_data, VkImage image, std::vector<VkImageLayout> &layouts) {
3505 auto sub_data = my_data->imageSubresourceMap.find(image);
3506 if (sub_data == my_data->imageSubresourceMap.end())
3507 return false;
Tobin Ehlis30df15c2016-10-12 17:17:57 -06003508 auto image_state = getImageState(my_data, image);
3509 if (!image_state)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003510 return false;
3511 bool ignoreGlobal = false;
3512 // TODO: Make this robust for >1 aspect mask. Now it will just say ignore
3513 // potential errors in this case.
Tobin Ehlis30df15c2016-10-12 17:17:57 -06003514 if (sub_data->second.size() >= (image_state->createInfo.arrayLayers * image_state->createInfo.mipLevels + 1)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003515 ignoreGlobal = true;
3516 }
3517 for (auto imgsubpair : sub_data->second) {
3518 if (ignoreGlobal && !imgsubpair.hasSubresource)
3519 continue;
3520 auto img_data = my_data->imageLayoutMap.find(imgsubpair);
3521 if (img_data != my_data->imageLayoutMap.end()) {
3522 layouts.push_back(img_data->second.layout);
3523 }
3524 }
3525 return true;
3526}
3527
3528// Set the layout on the global level
3529void SetLayout(layer_data *my_data, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
3530 VkImage &image = imgpair.image;
3531 // TODO (mlentine): Maybe set format if new? Not used atm.
3532 my_data->imageLayoutMap[imgpair].layout = layout;
3533 // TODO (mlentine): Maybe make vector a set?
3534 auto subresource = std::find(my_data->imageSubresourceMap[image].begin(), my_data->imageSubresourceMap[image].end(), imgpair);
3535 if (subresource == my_data->imageSubresourceMap[image].end()) {
3536 my_data->imageSubresourceMap[image].push_back(imgpair);
3537 }
3538}
3539
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003540// Set the layout on the cmdbuf level
Michael Lentine60063c22016-03-24 15:36:27 -05003541void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const IMAGE_CMD_BUF_LAYOUT_NODE &node) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003542 pCB->imageLayoutMap[imgpair] = node;
3543 // TODO (mlentine): Maybe make vector a set?
Michael Lentine60063c22016-03-24 15:36:27 -05003544 auto subresource =
3545 std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair);
3546 if (subresource == pCB->imageSubresourceMap[imgpair.image].end()) {
3547 pCB->imageSubresourceMap[imgpair.image].push_back(imgpair);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003548 }
3549}
3550
Michael Lentine60063c22016-03-24 15:36:27 -05003551void SetLayout(GLOBAL_CB_NODE *pCB, ImageSubresourcePair imgpair, const VkImageLayout &layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003552 // TODO (mlentine): Maybe make vector a set?
Michael Lentine60063c22016-03-24 15:36:27 -05003553 if (std::find(pCB->imageSubresourceMap[imgpair.image].begin(), pCB->imageSubresourceMap[imgpair.image].end(), imgpair) !=
3554 pCB->imageSubresourceMap[imgpair.image].end()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003555 pCB->imageLayoutMap[imgpair].layout = layout;
3556 } else {
3557 // TODO (mlentine): Could be expensive and might need to be removed.
3558 assert(imgpair.hasSubresource);
3559 IMAGE_CMD_BUF_LAYOUT_NODE node;
Mark Lobodzinski41bfce02016-03-28 14:34:40 -06003560 if (!FindLayout(pCB, imgpair.image, imgpair.subresource, node)) {
3561 node.initialLayout = layout;
3562 }
Michael Lentine60063c22016-03-24 15:36:27 -05003563 SetLayout(pCB, imgpair, {node.initialLayout, layout});
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003564 }
3565}
3566
Michael Lentine60063c22016-03-24 15:36:27 -05003567template <class OBJECT, class LAYOUT>
3568void SetLayout(OBJECT *pObject, ImageSubresourcePair imgpair, const LAYOUT &layout, VkImageAspectFlags aspectMask) {
3569 if (imgpair.subresource.aspectMask & aspectMask) {
3570 imgpair.subresource.aspectMask = aspectMask;
3571 SetLayout(pObject, imgpair, layout);
3572 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003573}
3574
Michael Lentine60063c22016-03-24 15:36:27 -05003575template <class OBJECT, class LAYOUT>
3576void SetLayout(OBJECT *pObject, VkImage image, VkImageSubresource range, const LAYOUT &layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003577 ImageSubresourcePair imgpair = {image, true, range};
Michael Lentine60063c22016-03-24 15:36:27 -05003578 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_COLOR_BIT);
3579 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_DEPTH_BIT);
3580 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_STENCIL_BIT);
3581 SetLayout(pObject, imgpair, layout, VK_IMAGE_ASPECT_METADATA_BIT);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003582}
3583
Michael Lentine60063c22016-03-24 15:36:27 -05003584template <class OBJECT, class LAYOUT> void SetLayout(OBJECT *pObject, VkImage image, const LAYOUT &layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003585 ImageSubresourcePair imgpair = {image, false, VkImageSubresource()};
Michael Lentine60063c22016-03-24 15:36:27 -05003586 SetLayout(pObject, image, imgpair, layout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003587}
3588
3589void SetLayout(const layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkImageView imageView, const VkImageLayout &layout) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06003590 auto view_state = getImageViewState(dev_data, imageView);
3591 assert(view_state);
Tobin Ehlisc8ca0312016-09-22 07:30:05 -06003592 auto image = view_state->create_info.image;
Tobin Ehlis8b26a382016-09-14 08:02:49 -06003593 const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003594 // TODO: Do not iterate over every possibility - consolidate where possible
3595 for (uint32_t j = 0; j < subRange.levelCount; j++) {
3596 uint32_t level = subRange.baseMipLevel + j;
3597 for (uint32_t k = 0; k < subRange.layerCount; k++) {
3598 uint32_t layer = subRange.baseArrayLayer + k;
3599 VkImageSubresource sub = {subRange.aspectMask, level, layer};
Mark Lobodzinski2af8e3f2016-08-01 15:51:39 -06003600 // TODO: If ImageView was created with depth or stencil, transition both layouts as
3601 // the aspectMask is ignored and both are used. Verify that the extra implicit layout
3602 // is OK for descriptor set layout validation
Mark Lobodzinski77a5d6f2016-08-05 09:38:18 -06003603 if (subRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06003604 if (vk_format_is_depth_and_stencil(view_state->create_info.format)) {
Mark Lobodzinski77a5d6f2016-08-05 09:38:18 -06003605 sub.aspectMask |= (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT);
3606 }
3607 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003608 SetLayout(pCB, image, sub, layout);
3609 }
3610 }
3611}
3612
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003613// Validate that given set is valid and that it's not being used by an in-flight CmdBuffer
3614// func_str is the name of the calling function
Dustin Graves8f1eab92016-04-05 09:41:17 -06003615// Return false if no errors occur
3616// Return true if validation error occurs and callback returns true (to skip upcoming API call down the chain)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003617static bool validateIdleDescriptorSet(const layer_data *dev_data, VkDescriptorSet set, std::string func_str) {
Chris Forbesa13fe522016-10-13 15:34:59 +13003618 if (dev_data->instance_data->disabled.idle_descriptor_set)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003619 return false;
Dustin Graves8f1eab92016-04-05 09:41:17 -06003620 bool skip_call = false;
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003621 auto set_node = dev_data->setMap.find(set);
3622 if (set_node == dev_data->setMap.end()) {
3623 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003624 (uint64_t)(set), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06003625 "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that has not been allocated.", func_str.c_str(),
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003626 (uint64_t)(set));
3627 } else {
Tobin Ehlisbf98b692016-10-06 12:58:06 -06003628 // TODO : This covers various error cases so should pass error enum into this function and use passed in enum here
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003629 if (set_node->second->in_use.load()) {
Tobin Ehlisbf98b692016-10-06 12:58:06 -06003630 skip_call |=
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06003631 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Tobin Ehlisbf98b692016-10-06 12:58:06 -06003632 (uint64_t)(set), __LINE__, VALIDATION_ERROR_00919, "DS",
3633 "Cannot call %s() on descriptor set 0x%" PRIxLEAST64 " that is in use by a command buffer. %s",
3634 func_str.c_str(), (uint64_t)(set), validation_error_map[VALIDATION_ERROR_00919]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003635 }
3636 }
3637 return skip_call;
3638}
Chris Forbes367c0322016-05-16 11:19:50 +12003639
Tobin Ehlisffbfe3f2016-05-26 13:39:11 -06003640// Remove set from setMap and delete the set
Tobin Ehlise3354532016-05-05 13:49:42 -06003641static void freeDescriptorSet(layer_data *dev_data, cvdescriptorset::DescriptorSet *descriptor_set) {
Tobin Ehlise3354532016-05-05 13:49:42 -06003642 dev_data->setMap.erase(descriptor_set->GetSet());
3643 delete descriptor_set;
3644}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003645// Free all DS Pools including their Sets & related sub-structs
3646// NOTE : Calls to this function should be wrapped in mutex
3647static void deletePools(layer_data *my_data) {
3648 if (my_data->descriptorPoolMap.size() <= 0)
3649 return;
3650 for (auto ii = my_data->descriptorPoolMap.begin(); ii != my_data->descriptorPoolMap.end(); ++ii) {
Tobin Ehlis5789b8f2016-05-05 13:18:20 -06003651 // Remove this pools' sets from setMap and delete them
Tobin Ehlis0a43bde2016-05-03 08:31:08 -06003652 for (auto ds : (*ii).second->sets) {
Tobin Ehlise3354532016-05-05 13:49:42 -06003653 freeDescriptorSet(my_data, ds);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003654 }
Tobin Ehlis9bfd4492016-05-05 15:09:11 -06003655 (*ii).second->sets.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003656 }
3657 my_data->descriptorPoolMap.clear();
3658}
3659
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003660static void clearDescriptorPool(layer_data *my_data, const VkDevice device, const VkDescriptorPool pool,
3661 VkDescriptorPoolResetFlags flags) {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06003662 DESCRIPTOR_POOL_STATE *pPool = getDescriptorPoolState(my_data, pool);
Tobin Ehlis5d749ea2016-07-18 13:14:01 -06003663 // TODO: validate flags
3664 // For every set off of this pool, clear it, remove from setMap, and free cvdescriptorset::DescriptorSet
3665 for (auto ds : pPool->sets) {
3666 freeDescriptorSet(my_data, ds);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003667 }
Tobin Ehlis5d749ea2016-07-18 13:14:01 -06003668 pPool->sets.clear();
3669 // Reset available count for each type and available sets for this pool
3670 for (uint32_t i = 0; i < pPool->availableDescriptorTypeCount.size(); ++i) {
3671 pPool->availableDescriptorTypeCount[i] = pPool->maxDescriptorTypeCount[i];
3672 }
3673 pPool->availableSets = pPool->maxSets;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003674}
3675
3676// For given CB object, fetch associated CB Node from map
Chris Forbes664ca7f2016-05-06 16:55:18 +12003677static GLOBAL_CB_NODE *getCBNode(layer_data const *my_data, const VkCommandBuffer cb) {
3678 auto it = my_data->commandBufferMap.find(cb);
3679 if (it == my_data->commandBufferMap.end()) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06003680 log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3681 reinterpret_cast<const uint64_t &>(cb), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07003682 "Attempt to use CommandBuffer 0x%p that doesn't exist!", cb);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003683 return NULL;
3684 }
Chris Forbes664ca7f2016-05-06 16:55:18 +12003685 return it->second;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003686}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003687// Free all CB Nodes
3688// NOTE : Calls to this function should be wrapped in mutex
3689static void deleteCommandBuffers(layer_data *my_data) {
Tobin Ehlis4c522322016-04-11 16:39:29 -06003690 if (my_data->commandBufferMap.empty()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003691 return;
3692 }
3693 for (auto ii = my_data->commandBufferMap.begin(); ii != my_data->commandBufferMap.end(); ++ii) {
3694 delete (*ii).second;
3695 }
3696 my_data->commandBufferMap.clear();
3697}
3698
Dustin Graves8f1eab92016-04-05 09:41:17 -06003699static bool report_error_no_cb_begin(const layer_data *dev_data, const VkCommandBuffer cb, const char *caller_name) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003700 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
3701 (uint64_t)cb, __LINE__, DRAWSTATE_NO_BEGIN_COMMAND_BUFFER, "DS",
3702 "You must call vkBeginCommandBuffer() before this call to %s", caller_name);
3703}
3704
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003705// If a renderpass is active, verify that the given command type is appropriate for current subpass state
3706bool ValidateCmdSubpassState(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd_type) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003707 if (!pCB->activeRenderPass)
Dustin Graves8f1eab92016-04-05 09:41:17 -06003708 return false;
3709 bool skip_call = false;
Tobin Ehlisf5d1a092016-05-17 14:50:07 -06003710 if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS &&
3711 (cmd_type != CMD_EXECUTECOMMANDS && cmd_type != CMD_NEXTSUBPASS && cmd_type != CMD_ENDRENDERPASS)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003712 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3713 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3714 "Commands cannot be called in a subpass using secondary command buffers.");
3715 } else if (pCB->activeSubpassContents == VK_SUBPASS_CONTENTS_INLINE && cmd_type == CMD_EXECUTECOMMANDS) {
3716 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3717 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3718 "vkCmdExecuteCommands() cannot be called in a subpass using inline commands.");
3719 }
3720 return skip_call;
3721}
3722
3723static bool checkGraphicsBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3724 if (!(flags & VK_QUEUE_GRAPHICS_BIT))
3725 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3726 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3727 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3728 return false;
3729}
3730
3731static bool checkComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3732 if (!(flags & VK_QUEUE_COMPUTE_BIT))
3733 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3734 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3735 "Cannot call %s on a command buffer allocated from a pool without compute capabilities.", name);
3736 return false;
3737}
3738
3739static bool checkGraphicsOrComputeBit(const layer_data *my_data, VkQueueFlags flags, const char *name) {
3740 if (!((flags & VK_QUEUE_GRAPHICS_BIT) || (flags & VK_QUEUE_COMPUTE_BIT)))
3741 return log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
3742 DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
3743 "Cannot call %s on a command buffer allocated from a pool without graphics capabilities.", name);
3744 return false;
3745}
3746
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003747// Validate the given command being added to the specified cmd buffer, flagging errors if CB is not
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003748// in the recording state or if there's an issue with the Cmd ordering
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003749static bool ValidateCmd(layer_data *my_data, GLOBAL_CB_NODE *pCB, const CMD_TYPE cmd, const char *caller_name) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003750 bool skip_call = false;
Chris Forbes07811fa2016-06-21 13:18:44 +12003751 auto pPool = getCommandPoolNode(my_data, pCB->createInfo.commandPool);
3752 if (pPool) {
3753 VkQueueFlags flags = my_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].queueFlags;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003754 switch (cmd) {
3755 case CMD_BINDPIPELINE:
3756 case CMD_BINDPIPELINEDELTA:
3757 case CMD_BINDDESCRIPTORSETS:
3758 case CMD_FILLBUFFER:
3759 case CMD_CLEARCOLORIMAGE:
3760 case CMD_SETEVENT:
3761 case CMD_RESETEVENT:
3762 case CMD_WAITEVENTS:
3763 case CMD_BEGINQUERY:
3764 case CMD_ENDQUERY:
3765 case CMD_RESETQUERYPOOL:
3766 case CMD_COPYQUERYPOOLRESULTS:
3767 case CMD_WRITETIMESTAMP:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003768 skip_call |= checkGraphicsOrComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003769 break;
3770 case CMD_SETVIEWPORTSTATE:
3771 case CMD_SETSCISSORSTATE:
3772 case CMD_SETLINEWIDTHSTATE:
3773 case CMD_SETDEPTHBIASSTATE:
3774 case CMD_SETBLENDSTATE:
3775 case CMD_SETDEPTHBOUNDSSTATE:
3776 case CMD_SETSTENCILREADMASKSTATE:
3777 case CMD_SETSTENCILWRITEMASKSTATE:
3778 case CMD_SETSTENCILREFERENCESTATE:
3779 case CMD_BINDINDEXBUFFER:
3780 case CMD_BINDVERTEXBUFFER:
3781 case CMD_DRAW:
3782 case CMD_DRAWINDEXED:
3783 case CMD_DRAWINDIRECT:
3784 case CMD_DRAWINDEXEDINDIRECT:
3785 case CMD_BLITIMAGE:
3786 case CMD_CLEARATTACHMENTS:
3787 case CMD_CLEARDEPTHSTENCILIMAGE:
3788 case CMD_RESOLVEIMAGE:
3789 case CMD_BEGINRENDERPASS:
3790 case CMD_NEXTSUBPASS:
3791 case CMD_ENDRENDERPASS:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003792 skip_call |= checkGraphicsBit(my_data, flags, cmdTypeToString(cmd).c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003793 break;
3794 case CMD_DISPATCH:
3795 case CMD_DISPATCHINDIRECT:
Tobin Ehlisfe871282016-06-28 10:28:02 -06003796 skip_call |= checkComputeBit(my_data, flags, cmdTypeToString(cmd).c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003797 break;
3798 case CMD_COPYBUFFER:
3799 case CMD_COPYIMAGE:
3800 case CMD_COPYBUFFERTOIMAGE:
3801 case CMD_COPYIMAGETOBUFFER:
3802 case CMD_CLONEIMAGEDATA:
3803 case CMD_UPDATEBUFFER:
3804 case CMD_PIPELINEBARRIER:
3805 case CMD_EXECUTECOMMANDS:
Tobin Ehlis244fe582016-05-16 11:23:01 -06003806 case CMD_END:
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003807 break;
3808 default:
3809 break;
3810 }
3811 }
3812 if (pCB->state != CB_RECORDING) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06003813 skip_call |= report_error_no_cb_begin(my_data, pCB->commandBuffer, caller_name);
Tobin Ehlis244fe582016-05-16 11:23:01 -06003814 } else {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003815 skip_call |= ValidateCmdSubpassState(my_data, pCB, cmd);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003816 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06003817 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003818}
Tobin Ehlis1c883a02016-12-19 15:59:16 -07003819
3820static void UpdateCmdBufferLastCmd(layer_data *my_data, GLOBAL_CB_NODE *cb_state, const CMD_TYPE cmd) {
3821 if (cb_state->state == CB_RECORDING) {
3822 cb_state->last_cmd = cmd;
3823 }
3824}
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003825// For given object struct return a ptr of BASE_NODE type for its wrapping struct
3826BASE_NODE *GetStateStructPtrFromObject(layer_data *dev_data, VK_OBJECT object_struct) {
3827 BASE_NODE *base_ptr = nullptr;
3828 switch (object_struct.type) {
3829 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
3830 base_ptr = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(object_struct.handle));
3831 break;
3832 }
3833 case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06003834 base_ptr = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003835 break;
3836 }
3837 case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
3838 base_ptr = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(object_struct.handle));
3839 break;
3840 }
3841 case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003842 base_ptr = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003843 break;
3844 }
3845 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07003846 base_ptr = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003847 break;
3848 }
3849 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
3850 base_ptr = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(object_struct.handle));
3851 break;
3852 }
3853 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06003854 base_ptr = getImageState(dev_data, reinterpret_cast<VkImage &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003855 break;
3856 }
3857 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
3858 base_ptr = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(object_struct.handle));
3859 break;
3860 }
3861 case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
3862 base_ptr = getEventNode(dev_data, reinterpret_cast<VkEvent &>(object_struct.handle));
3863 break;
3864 }
3865 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06003866 base_ptr = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003867 break;
3868 }
3869 case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
3870 base_ptr = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(object_struct.handle));
3871 break;
3872 }
3873 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
Tobin Ehlis04c04272016-10-12 11:54:09 -06003874 base_ptr = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003875 break;
3876 }
3877 case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06003878 base_ptr = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(object_struct.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003879 break;
3880 }
3881 case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
3882 base_ptr = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(object_struct.handle));
3883 break;
3884 }
3885 default:
3886 // TODO : Any other objects to be handled here?
3887 assert(0);
3888 break;
3889 }
3890 return base_ptr;
3891}
3892
Tobin Ehlisbfb8cc32016-07-08 13:27:48 -06003893// Tie the VK_OBJECT to the cmd buffer which includes:
3894// Add object_binding to cmd buffer
3895// Add cb_binding to object
3896static void addCommandBufferBinding(std::unordered_set<GLOBAL_CB_NODE *> *cb_bindings, VK_OBJECT obj, GLOBAL_CB_NODE *cb_node) {
3897 cb_bindings->insert(cb_node);
3898 cb_node->object_bindings.insert(obj);
3899}
Tobin Ehlis96f1d602016-07-08 12:33:45 -06003900// For a given object, if cb_node is in that objects cb_bindings, remove cb_node
3901static void removeCommandBufferBinding(layer_data *dev_data, VK_OBJECT const *object, GLOBAL_CB_NODE *cb_node) {
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06003902 BASE_NODE *base_obj = GetStateStructPtrFromObject(dev_data, *object);
3903 if (base_obj)
3904 base_obj->cb_bindings.erase(cb_node);
Tobin Ehlis96f1d602016-07-08 12:33:45 -06003905}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003906// Reset the command buffer state
3907// Maintain the createInfo and set state to CB_NEW, but clear all other state
Tobin Ehlis4c522322016-04-11 16:39:29 -06003908static void resetCB(layer_data *dev_data, const VkCommandBuffer cb) {
3909 GLOBAL_CB_NODE *pCB = dev_data->commandBufferMap[cb];
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003910 if (pCB) {
Michael Lentinefebf20b2016-04-20 23:01:26 -05003911 pCB->in_use.store(0);
Chris Forbese46e0a12016-12-20 11:33:11 +13003912 pCB->last_cmd = CMD_NONE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003913 // Reset CB state (note that createInfo is not cleared)
3914 pCB->commandBuffer = cb;
3915 memset(&pCB->beginInfo, 0, sizeof(VkCommandBufferBeginInfo));
3916 memset(&pCB->inheritanceInfo, 0, sizeof(VkCommandBufferInheritanceInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003917 pCB->numCmds = 0;
3918 memset(pCB->drawCount, 0, NUM_DRAW_TYPES * sizeof(uint64_t));
3919 pCB->state = CB_NEW;
3920 pCB->submitCount = 0;
3921 pCB->status = 0;
Chris Forbes5fc77832016-07-28 14:15:38 +12003922 pCB->viewportMask = 0;
3923 pCB->scissorMask = 0;
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003924
Tobin Ehlis223b01e2016-03-21 14:14:44 -06003925 for (uint32_t i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; ++i) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06003926 pCB->lastBound[i].reset();
3927 }
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003928
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003929 memset(&pCB->activeRenderPassBeginInfo, 0, sizeof(pCB->activeRenderPassBeginInfo));
Chris Forbesa4937a72016-05-06 16:31:14 +12003930 pCB->activeRenderPass = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003931 pCB->activeSubpassContents = VK_SUBPASS_CONTENTS_INLINE;
3932 pCB->activeSubpass = 0;
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06003933 pCB->broken_bindings.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003934 pCB->waitedEvents.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003935 pCB->events.clear();
Mark Lobodzinskic54d24e2016-06-13 16:56:26 -06003936 pCB->writeEventsBeforeWait.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003937 pCB->waitedEventsBeforeQueryReset.clear();
3938 pCB->queryToStateMap.clear();
3939 pCB->activeQueries.clear();
3940 pCB->startedQueries.clear();
Michael Lentine11fe3772016-04-20 11:39:50 -05003941 pCB->imageSubresourceMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003942 pCB->imageLayoutMap.clear();
3943 pCB->eventToStageMap.clear();
3944 pCB->drawData.clear();
3945 pCB->currentDrawData.buffers.clear();
Tobin Ehlis232017e2016-12-21 10:28:54 -07003946 pCB->vertex_buffer_used = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003947 pCB->primaryCommandBuffer = VK_NULL_HANDLE;
Tobin Ehlis739d62a2016-04-14 12:22:03 -06003948 // Make sure any secondaryCommandBuffers are removed from globalInFlight
3949 for (auto secondary_cb : pCB->secondaryCommandBuffers) {
3950 dev_data->globalInFlightCmdBuffers.erase(secondary_cb);
3951 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003952 pCB->secondaryCommandBuffers.clear();
Tobin Ehlis2e319d42016-03-25 11:49:51 -06003953 pCB->updateImages.clear();
3954 pCB->updateBuffers.clear();
Tobin Ehlis4c522322016-04-11 16:39:29 -06003955 clear_cmd_buf_and_mem_references(dev_data, pCB);
Michael Lentineb653eb22016-03-18 14:11:44 -05003956 pCB->eventUpdates.clear();
Michael Lentine5627e692016-05-20 17:45:02 -05003957 pCB->queryUpdates.clear();
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003958
Tobin Ehlis96f1d602016-07-08 12:33:45 -06003959 // Remove object bindings
3960 for (auto obj : pCB->object_bindings) {
3961 removeCommandBufferBinding(dev_data, &obj, pCB);
3962 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06003963 pCB->object_bindings.clear();
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003964 // Remove this cmdBuffer's reference from each FrameBuffer's CB ref list
3965 for (auto framebuffer : pCB->framebuffers) {
Tobin Ehlis04c04272016-10-12 11:54:09 -06003966 auto fb_state = getFramebufferState(dev_data, framebuffer);
3967 if (fb_state)
3968 fb_state->cb_bindings.erase(pCB);
Mark Lobodzinski188b2302016-04-12 10:41:59 -06003969 }
3970 pCB->framebuffers.clear();
Tobin Ehlis51b5f052016-05-16 08:36:40 -06003971 pCB->activeFramebuffer = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003972 }
3973}
3974
3975// Set PSO-related status bits for CB, including dynamic state set via PSO
Tobin Ehlis52c76a32016-10-12 09:05:51 -06003976static void set_cb_pso_status(GLOBAL_CB_NODE *pCB, const PIPELINE_STATE *pPipe) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003977 // Account for any dynamic state not set via this PSO
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003978 if (!pPipe->graphicsPipelineCI.pDynamicState ||
3979 !pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount) { // All state is static
Tobin Ehliseebcc652016-10-25 15:23:08 -06003980 pCB->status |= CBSTATUS_ALL_STATE_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003981 } else {
3982 // First consider all state on
3983 // Then unset any state that's noted as dynamic in PSO
3984 // Finally OR that into CB statemask
Tobin Ehliseebcc652016-10-25 15:23:08 -06003985 CBStatusFlags psoDynStateMask = CBSTATUS_ALL_STATE_SET;
Tobin Ehlis5f4cef12016-04-01 13:51:33 -06003986 for (uint32_t i = 0; i < pPipe->graphicsPipelineCI.pDynamicState->dynamicStateCount; i++) {
3987 switch (pPipe->graphicsPipelineCI.pDynamicState->pDynamicStates[i]) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003988 case VK_DYNAMIC_STATE_LINE_WIDTH:
3989 psoDynStateMask &= ~CBSTATUS_LINE_WIDTH_SET;
3990 break;
3991 case VK_DYNAMIC_STATE_DEPTH_BIAS:
3992 psoDynStateMask &= ~CBSTATUS_DEPTH_BIAS_SET;
3993 break;
3994 case VK_DYNAMIC_STATE_BLEND_CONSTANTS:
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06003995 psoDynStateMask &= ~CBSTATUS_BLEND_CONSTANTS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07003996 break;
3997 case VK_DYNAMIC_STATE_DEPTH_BOUNDS:
3998 psoDynStateMask &= ~CBSTATUS_DEPTH_BOUNDS_SET;
3999 break;
4000 case VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK:
4001 psoDynStateMask &= ~CBSTATUS_STENCIL_READ_MASK_SET;
4002 break;
4003 case VK_DYNAMIC_STATE_STENCIL_WRITE_MASK:
4004 psoDynStateMask &= ~CBSTATUS_STENCIL_WRITE_MASK_SET;
4005 break;
4006 case VK_DYNAMIC_STATE_STENCIL_REFERENCE:
4007 psoDynStateMask &= ~CBSTATUS_STENCIL_REFERENCE_SET;
4008 break;
4009 default:
4010 // TODO : Flag error here
4011 break;
4012 }
4013 }
4014 pCB->status |= psoDynStateMask;
4015 }
4016}
4017
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004018// Flags validation error if the associated call is made inside a render pass. The apiName
4019// routine should ONLY be called outside a render pass.
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004020static bool insideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4021 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004022 bool inside = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004023 if (pCB->activeRenderPass) {
4024 inside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004025 (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4026 "%s: It is invalid to issue this call inside an active render pass (0x%" PRIxLEAST64 "). %s", apiName,
4027 (uint64_t)pCB->activeRenderPass->renderPass, validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004028 }
4029 return inside;
4030}
4031
4032// Flags validation error if the associated call is made outside a render pass. The apiName
4033// routine should ONLY be called inside a render pass.
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004034static bool outsideRenderPass(const layer_data *my_data, GLOBAL_CB_NODE *pCB, const char *apiName,
4035 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004036 bool outside = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004037 if (((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) && (!pCB->activeRenderPass)) ||
4038 ((pCB->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) && (!pCB->activeRenderPass) &&
4039 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT))) {
4040 outside = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004041 (uint64_t)pCB->commandBuffer, __LINE__, msgCode, "DS",
4042 "%s: This call must be issued inside an active render pass. %s", apiName, validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004043 }
4044 return outside;
4045}
4046
Chris Forbesfb06dd62016-10-03 19:14:25 +13004047static void init_core_validation(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004048
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004049 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "lunarg_core_validation");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004050
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004051}
4052
Chris Forbesf9f87832016-10-04 17:42:54 +13004053static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, instance_layer_data *instance_data) {
4054 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4055 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME))
4056 instance_data->surfaceExtensionEnabled = true;
4057 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME))
4058 instance_data->displayExtensionEnabled = true;
4059#ifdef VK_USE_PLATFORM_ANDROID_KHR
4060 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME))
4061 instance_data->androidSurfaceExtensionEnabled = true;
4062#endif
4063#ifdef VK_USE_PLATFORM_MIR_KHR
4064 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME))
4065 instance_data->mirSurfaceExtensionEnabled = true;
4066#endif
4067#ifdef VK_USE_PLATFORM_WAYLAND_KHR
4068 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME))
4069 instance_data->waylandSurfaceExtensionEnabled = true;
4070#endif
4071#ifdef VK_USE_PLATFORM_WIN32_KHR
4072 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME))
4073 instance_data->win32SurfaceExtensionEnabled = true;
4074#endif
4075#ifdef VK_USE_PLATFORM_XCB_KHR
4076 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME))
4077 instance_data->xcbSurfaceExtensionEnabled = true;
4078#endif
4079#ifdef VK_USE_PLATFORM_XLIB_KHR
4080 if (!strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME))
4081 instance_data->xlibSurfaceExtensionEnabled = true;
4082#endif
4083 }
4084}
4085
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004086VKAPI_ATTR VkResult VKAPI_CALL
4087CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkInstance *pInstance) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004088 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4089
4090 assert(chain_info->u.pLayerInfo);
4091 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4092 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
4093 if (fpCreateInstance == NULL)
4094 return VK_ERROR_INITIALIZATION_FAILED;
4095
4096 // Advance the link info for the next element on the chain
4097 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4098
4099 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
4100 if (result != VK_SUCCESS)
4101 return result;
4102
Chris Forbesfb06dd62016-10-03 19:14:25 +13004103 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), instance_layer_data_map);
Chia-I Wub2c32ea2016-05-06 12:19:33 +08004104 instance_data->instance = *pInstance;
Chris Forbes65724852016-10-03 19:54:31 +13004105 layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004106
Chris Forbes65724852016-10-03 19:54:31 +13004107 instance_data->report_data = debug_report_create_instance(
4108 &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
Chris Forbesf9f87832016-10-04 17:42:54 +13004109 checkInstanceRegisterExtensions(pCreateInfo, instance_data);
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004110 init_core_validation(instance_data, pAllocator);
Mark Lobodzinskibbe3d972016-06-29 10:53:51 -06004111
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004112 ValidateLayerOrdering(*pCreateInfo);
4113
4114 return result;
4115}
4116
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07004117// Hook DestroyInstance to remove tableInstanceMap entry
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004118VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004119 // TODOSC : Shouldn't need any customization here
4120 dispatch_key key = get_dispatch_key(instance);
4121 // TBD: Need any locking this early, in case this function is called at the
4122 // same time by more than one thread?
Chris Forbes65724852016-10-03 19:54:31 +13004123 instance_layer_data *instance_data = get_my_data_ptr(key, instance_layer_data_map);
4124 instance_data->dispatch_table.DestroyInstance(instance, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004125
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004126 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004127 // Clean up logging callback, if any
Chris Forbes65724852016-10-03 19:54:31 +13004128 while (instance_data->logging_callback.size() > 0) {
4129 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
4130 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
4131 instance_data->logging_callback.pop_back();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004132 }
4133
Chris Forbes65724852016-10-03 19:54:31 +13004134 layer_debug_report_destroy_instance(instance_data->report_data);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004135 layer_data_map.erase(key);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004136}
4137
Mark Youngaa1aa3a2016-07-05 16:41:50 -06004138static void checkDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004139 uint32_t i;
4140 // TBD: Need any locking, in case this function is called at the same time
4141 // by more than one thread?
4142 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4143 dev_data->device_extensions.wsi_enabled = false;
Mark Young1a867442016-07-01 15:18:27 -06004144 dev_data->device_extensions.wsi_display_swapchain_enabled = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004145
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004146 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
4147 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
4148 dev_data->device_extensions.wsi_enabled = true;
Mark Young1a867442016-07-01 15:18:27 -06004149 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME) == 0)
4150 dev_data->device_extensions.wsi_display_swapchain_enabled = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004151 }
4152}
4153
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004154// Verify that queue family has been properly requested
Mike Weiblen6daea5b2016-12-19 20:41:58 -07004155static bool ValidateRequestedQueueFamilyProperties(instance_layer_data *instance_data, VkPhysicalDevice gpu,
4156 const VkDeviceCreateInfo *create_info) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004157 bool skip_call = false;
Chris Forbes7ff421e2016-10-03 17:55:48 +13004158 auto physical_device_state = getPhysicalDeviceState(instance_data, gpu);
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004159 // First check is app has actually requested queueFamilyProperties
Chris Forbes7ff421e2016-10-03 17:55:48 +13004160 if (!physical_device_state) {
4161 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004162 0, __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
4163 "Invalid call to vkCreateDevice() w/o first calling vkEnumeratePhysicalDevices().");
Chris Forbes7ff421e2016-10-03 17:55:48 +13004164 } else if (QUERY_DETAILS != physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004165 // TODO: This is not called out as an invalid use in the spec so make more informative recommendation.
Chris Forbes7ff421e2016-10-03 17:55:48 +13004166 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004167 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST,
4168 "DL", "Call to vkCreateDevice() w/o first calling vkGetPhysicalDeviceQueueFamilyProperties().");
4169 } else {
4170 // Check that the requested queue properties are valid
4171 for (uint32_t i = 0; i < create_info->queueCreateInfoCount; i++) {
4172 uint32_t requestedIndex = create_info->pQueueCreateInfos[i].queueFamilyIndex;
Chris Forbes8c09adb2016-10-03 18:06:20 +13004173 if (requestedIndex >= physical_device_state->queue_family_properties.size()) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004174 skip_call |= log_msg(
Chris Forbes7ff421e2016-10-03 17:55:48 +13004175 instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004176 __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4177 "Invalid queue create request in vkCreateDevice(). Invalid queueFamilyIndex %u requested.", requestedIndex);
4178 } else if (create_info->pQueueCreateInfos[i].queueCount >
Chris Forbes8c09adb2016-10-03 18:06:20 +13004179 physical_device_state->queue_family_properties[requestedIndex].queueCount) {
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004180 skip_call |=
Chris Forbes7ff421e2016-10-03 17:55:48 +13004181 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004182 0, __LINE__, DEVLIMITS_INVALID_QUEUE_CREATE_REQUEST, "DL",
4183 "Invalid queue create request in vkCreateDevice(). QueueFamilyIndex %u only has %u queues, but "
4184 "requested queueCount is %u.",
Chris Forbes8c09adb2016-10-03 18:06:20 +13004185 requestedIndex, physical_device_state->queue_family_properties[requestedIndex].queueCount,
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004186 create_info->pQueueCreateInfos[i].queueCount);
4187 }
4188 }
4189 }
4190 return skip_call;
4191}
4192
Mark Lobodzinski51695432016-06-27 16:47:24 -06004193// Verify that features have been queried and that they are available
Chris Forbesfb06dd62016-10-03 19:14:25 +13004194static bool ValidateRequestedFeatures(instance_layer_data *dev_data, VkPhysicalDevice phys, const VkPhysicalDeviceFeatures *requested_features) {
Mark Lobodzinski51695432016-06-27 16:47:24 -06004195 bool skip_call = false;
4196
Chris Forbes8245eba2016-10-03 17:36:32 +13004197 auto phys_device_state = getPhysicalDeviceState(dev_data, phys);
4198 const VkBool32 *actual = reinterpret_cast<VkBool32 *>(&phys_device_state->features);
Mark Lobodzinskibbe3d972016-06-29 10:53:51 -06004199 const VkBool32 *requested = reinterpret_cast<const VkBool32 *>(requested_features);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004200 // TODO : This is a nice, compact way to loop through struct, but a bad way to report issues
4201 // Need to provide the struct member name with the issue. To do that seems like we'll
4202 // have to loop through each struct member which should be done w/ codegen to keep in synch.
4203 uint32_t errors = 0;
4204 uint32_t total_bools = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
4205 for (uint32_t i = 0; i < total_bools; i++) {
4206 if (requested[i] > actual[i]) {
4207 // TODO: Add index to struct member name helper to be able to include a feature name
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004208 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mark Lobodzinski51695432016-06-27 16:47:24 -06004209 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4210 "DL", "While calling vkCreateDevice(), requesting feature #%u in VkPhysicalDeviceFeatures struct, "
4211 "which is not available on this device.",
4212 i);
4213 errors++;
4214 }
4215 }
Chris Forbes8245eba2016-10-03 17:36:32 +13004216 if (errors && (UNCALLED == phys_device_state->vkGetPhysicalDeviceFeaturesState)) {
Mark Lobodzinski51695432016-06-27 16:47:24 -06004217 // If user didn't request features, notify them that they should
4218 // TODO: Verify this against the spec. I believe this is an invalid use of the API and should return an error
Mark Lobodzinski9cbcee02016-06-28 13:58:54 -06004219 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -06004220 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_INVALID_FEATURE_REQUESTED,
4221 "DL", "You requested features that are unavailable on this device. You should first query feature "
4222 "availability by calling vkGetPhysicalDeviceFeatures().");
Mark Lobodzinski51695432016-06-27 16:47:24 -06004223 }
4224 return skip_call;
4225}
4226
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004227VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
4228 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
Chris Forbesfb06dd62016-10-03 19:14:25 +13004229 instance_layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), instance_layer_data_map);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004230 bool skip_call = false;
4231
4232 // Check that any requested features are available
4233 if (pCreateInfo->pEnabledFeatures) {
Chris Forbes8245eba2016-10-03 17:36:32 +13004234 skip_call |= ValidateRequestedFeatures(my_instance_data, gpu, pCreateInfo->pEnabledFeatures);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004235 }
Chris Forbes7ff421e2016-10-03 17:55:48 +13004236 skip_call |= ValidateRequestedQueueFamilyProperties(my_instance_data, gpu, pCreateInfo);
Mark Lobodzinski51695432016-06-27 16:47:24 -06004237
Mark Mueller49b32002016-08-16 09:47:31 -06004238 if (skip_call) {
4239 return VK_ERROR_VALIDATION_FAILED_EXT;
4240 }
4241
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004242 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
4243
4244 assert(chain_info->u.pLayerInfo);
4245 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
4246 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Chia-I Wub2c32ea2016-05-06 12:19:33 +08004247 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004248 if (fpCreateDevice == NULL) {
4249 return VK_ERROR_INITIALIZATION_FAILED;
4250 }
4251
4252 // Advance the link info for the next element on the chain
4253 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
4254
4255 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
4256 if (result != VK_SUCCESS) {
4257 return result;
4258 }
4259
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004260 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004261 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
4262
Chris Forbes2e6c4942016-10-03 17:44:52 +13004263 my_device_data->instance_data = my_instance_data;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004264 // Setup device dispatch table
Chris Forbesaaa9c282016-10-03 20:01:14 +13004265 layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr);
Chris Forbes15864502016-03-30 11:35:21 +13004266 my_device_data->device = *pDevice;
Mark Lobodzinskicf0f7b62016-11-16 12:12:56 -07004267 // Save PhysicalDevice handle
4268 my_device_data->physical_device = gpu;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004269
4270 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Mark Youngaa1aa3a2016-07-05 16:41:50 -06004271 checkDeviceRegisterExtensions(pCreateInfo, *pDevice);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004272 // Get physical device limits for this device
Chris Forbes65724852016-10-03 19:54:31 +13004273 my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &(my_device_data->phys_dev_properties.properties));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004274 uint32_t count;
Chris Forbes65724852016-10-03 19:54:31 +13004275 my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(gpu, &count, nullptr);
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004276 my_device_data->phys_dev_properties.queue_family_properties.resize(count);
Chris Forbes65724852016-10-03 19:54:31 +13004277 my_instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(
Tobin Ehlise54be7b2016-04-11 14:49:55 -06004278 gpu, &count, &my_device_data->phys_dev_properties.queue_family_properties[0]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004279 // TODO: device limits should make sure these are compatible
4280 if (pCreateInfo->pEnabledFeatures) {
Chris Forbes94c5f532016-10-03 17:42:38 +13004281 my_device_data->enabled_features = *pCreateInfo->pEnabledFeatures;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004282 } else {
Chris Forbes94c5f532016-10-03 17:42:38 +13004283 memset(&my_device_data->enabled_features, 0, sizeof(VkPhysicalDeviceFeatures));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004284 }
Mark Lobodzinskif6858592017-01-11 09:34:01 -07004285 // Store physical device properties and physical device mem limits into device layer_data structs
Chris Forbes65724852016-10-03 19:54:31 +13004286 my_instance_data->dispatch_table.GetPhysicalDeviceMemoryProperties(gpu, &my_device_data->phys_dev_mem_props);
Mark Lobodzinskif6858592017-01-11 09:34:01 -07004287 my_instance_data->dispatch_table.GetPhysicalDeviceProperties(gpu, &my_device_data->phys_dev_props);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004288 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004289
4290 ValidateLayerOrdering(*pCreateInfo);
4291
4292 return result;
4293}
4294
4295// prototype
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004296VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004297 // TODOSC : Shouldn't need any customization here
Tobin Ehlisbdacdd82016-10-12 07:25:59 -06004298 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004299 dispatch_key key = get_dispatch_key(device);
4300 layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
4301 // Free all the memory
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004302 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004303 deletePipelines(dev_data);
Chris Forbesef730462016-09-27 12:03:31 +13004304 dev_data->renderPassMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004305 deleteCommandBuffers(dev_data);
Tobin Ehlis9bfd4492016-05-05 15:09:11 -06004306 // This will also delete all sets in the pool & remove them from setMap
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004307 deletePools(dev_data);
Tobin Ehlis9bfd4492016-05-05 15:09:11 -06004308 // All sets should be removed
4309 assert(dev_data->setMap.empty());
Tobin Ehlis546326f2016-04-26 11:06:05 -06004310 for (auto del_layout : dev_data->descriptorSetLayoutMap) {
4311 delete del_layout.second;
4312 }
Tobin Ehlis2d9deec2016-04-21 14:19:26 -06004313 dev_data->descriptorSetLayoutMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004314 dev_data->imageViewMap.clear();
4315 dev_data->imageMap.clear();
4316 dev_data->imageSubresourceMap.clear();
4317 dev_data->imageLayoutMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004318 dev_data->bufferViewMap.clear();
4319 dev_data->bufferMap.clear();
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004320 // Queues persist until device is destroyed
4321 dev_data->queueMap.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004322 // Report any memory leaks
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004323 layer_debug_report_destroy_device(device);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004324 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004325
4326#if DISPATCH_MAP_DEBUG
Mark Muelleraab36502016-05-03 13:17:29 -06004327 fprintf(stderr, "Device: 0x%p, key: 0x%p\n", device, key);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004328#endif
Tobin Ehlisbdacdd82016-10-12 07:25:59 -06004329 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13004330 dev_data->dispatch_table.DestroyDevice(device, pAllocator);
Tobin Ehlisbdacdd82016-10-12 07:25:59 -06004331 layer_data_map.erase(key);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004332 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004333}
4334
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004335static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION}};
4336
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004337// This validates that the initial layout specified in the command buffer for
4338// the IMAGE is the same
4339// as the global IMAGE layout
Chris Forbes35358882016-05-17 17:42:27 +12004340static bool ValidateCmdBufImageLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004341 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004342 for (auto cb_image_data : pCB->imageLayoutMap) {
4343 VkImageLayout imageLayout;
4344 if (!FindLayout(dev_data, cb_image_data.first, imageLayout)) {
4345 skip_call |=
4346 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
Mark Muelleraab36502016-05-03 13:17:29 -06004347 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot submit cmd buffer using deleted image 0x%" PRIx64 ".",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004348 reinterpret_cast<const uint64_t &>(cb_image_data.first));
4349 } else {
4350 if (cb_image_data.second.initialLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
4351 // TODO: Set memory invalid which is in mem_tracker currently
4352 } else if (imageLayout != cb_image_data.second.initialLayout) {
Mark Young000d14d2016-04-11 16:53:53 -06004353 if (cb_image_data.first.hasSubresource) {
4354 skip_call |= log_msg(
4355 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbes35358882016-05-17 17:42:27 +12004356 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06004357 "Cannot submit cmd buffer using image (0x%" PRIx64 ") [sub-resource: aspectMask 0x%X array layer %u, mip level %u], "
Mark Young000d14d2016-04-11 16:53:53 -06004358 "with layout %s when first use is %s.",
Mark Muelleraab36502016-05-03 13:17:29 -06004359 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), cb_image_data.first.subresource.aspectMask,
4360 cb_image_data.first.subresource.arrayLayer,
4361 cb_image_data.first.subresource.mipLevel, string_VkImageLayout(imageLayout),
Mark Young000d14d2016-04-11 16:53:53 -06004362 string_VkImageLayout(cb_image_data.second.initialLayout));
4363 } else {
4364 skip_call |= log_msg(
4365 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbes35358882016-05-17 17:42:27 +12004366 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06004367 "Cannot submit cmd buffer using image (0x%" PRIx64 ") with layout %s when "
Mark Young000d14d2016-04-11 16:53:53 -06004368 "first use is %s.",
4369 reinterpret_cast<const uint64_t &>(cb_image_data.first.image), string_VkImageLayout(imageLayout),
4370 string_VkImageLayout(cb_image_data.second.initialLayout));
4371 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004372 }
4373 SetLayout(dev_data, cb_image_data.first, cb_image_data.second.layout);
4374 }
4375 }
4376 return skip_call;
4377}
Mark Lobodzinskif7422d92016-03-25 15:22:33 -06004378
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004379// Loop through bound objects and increment their in_use counts
4380// For any unknown objects, flag an error
Tobin Ehlisee3af202016-09-07 11:09:01 -06004381static bool ValidateAndIncrementBoundObjects(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004382 bool skip = false;
4383 DRAW_STATE_ERROR error_code = DRAWSTATE_NONE;
4384 BASE_NODE *base_obj = nullptr;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004385 for (auto obj : cb_node->object_bindings) {
4386 switch (obj.type) {
4387 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT: {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004388 base_obj = getSetNode(dev_data, reinterpret_cast<VkDescriptorSet &>(obj.handle));
4389 error_code = DRAWSTATE_INVALID_DESCRIPTOR_SET;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004390 break;
4391 }
Tobin Ehlis8020eea2016-08-17 11:10:41 -06004392 case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT: {
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06004393 base_obj = getSamplerState(dev_data, reinterpret_cast<VkSampler &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004394 error_code = DRAWSTATE_INVALID_SAMPLER;
Tobin Ehlis8020eea2016-08-17 11:10:41 -06004395 break;
4396 }
Tobin Ehlisee3af202016-09-07 11:09:01 -06004397 case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT: {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004398 base_obj = getQueryPoolNode(dev_data, reinterpret_cast<VkQueryPool &>(obj.handle));
4399 error_code = DRAWSTATE_INVALID_QUERY_POOL;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004400 break;
4401 }
4402 case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT: {
Tobin Ehlis52c76a32016-10-12 09:05:51 -06004403 base_obj = getPipelineState(dev_data, reinterpret_cast<VkPipeline &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004404 error_code = DRAWSTATE_INVALID_PIPELINE;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004405 break;
4406 }
4407 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004408 base_obj = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004409 error_code = DRAWSTATE_INVALID_BUFFER;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004410 break;
4411 }
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004412 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT: {
4413 base_obj = getBufferViewState(dev_data, reinterpret_cast<VkBufferView &>(obj.handle));
4414 error_code = DRAWSTATE_INVALID_BUFFER_VIEW;
4415 break;
4416 }
Tobin Ehlisee3af202016-09-07 11:09:01 -06004417 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06004418 base_obj = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004419 error_code = DRAWSTATE_INVALID_IMAGE;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004420 break;
4421 }
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004422 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT: {
4423 base_obj = getImageViewState(dev_data, reinterpret_cast<VkImageView &>(obj.handle));
4424 error_code = DRAWSTATE_INVALID_IMAGE_VIEW;
4425 break;
4426 }
Tobin Ehlisee3af202016-09-07 11:09:01 -06004427 case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT: {
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004428 base_obj = getEventNode(dev_data, reinterpret_cast<VkEvent &>(obj.handle));
4429 error_code = DRAWSTATE_INVALID_EVENT;
Tobin Ehlisee3af202016-09-07 11:09:01 -06004430 break;
4431 }
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004432 case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT: {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06004433 base_obj = getDescriptorPoolState(dev_data, reinterpret_cast<VkDescriptorPool &>(obj.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004434 error_code = DRAWSTATE_INVALID_DESCRIPTOR_POOL;
4435 break;
4436 }
4437 case VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT: {
4438 base_obj = getCommandPoolNode(dev_data, reinterpret_cast<VkCommandPool &>(obj.handle));
4439 error_code = DRAWSTATE_INVALID_COMMAND_POOL;
4440 break;
4441 }
4442 case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT: {
Tobin Ehlis04c04272016-10-12 11:54:09 -06004443 base_obj = getFramebufferState(dev_data, reinterpret_cast<VkFramebuffer &>(obj.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004444 error_code = DRAWSTATE_INVALID_FRAMEBUFFER;
4445 break;
4446 }
4447 case VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT: {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06004448 base_obj = getRenderPassState(dev_data, reinterpret_cast<VkRenderPass &>(obj.handle));
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004449 error_code = DRAWSTATE_INVALID_RENDERPASS;
4450 break;
4451 }
4452 case VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT: {
4453 base_obj = getMemObjInfo(dev_data, reinterpret_cast<VkDeviceMemory &>(obj.handle));
4454 error_code = DRAWSTATE_INVALID_DEVICE_MEMORY;
4455 break;
4456 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004457 default:
4458 // TODO : Merge handling of other objects types into this code
4459 break;
4460 }
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004461 if (!base_obj) {
4462 skip |=
4463 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj.type, obj.handle, __LINE__, error_code, "DS",
4464 "Cannot submit cmd buffer using deleted %s 0x%" PRIx64 ".", object_type_to_string(obj.type), obj.handle);
4465 } else {
4466 base_obj->in_use.fetch_add(1);
4467 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004468 }
Tobin Ehliseffb4b42016-09-14 06:55:51 -06004469 return skip;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004470}
4471
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004472// Track which resources are in-flight by atomically incrementing their "in_use" count
Tobin Ehlis77416de2016-07-21 17:51:22 -06004473static bool validateAndIncrementResources(layer_data *dev_data, GLOBAL_CB_NODE *cb_node) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06004474 bool skip_call = false;
Chris Forbes77753912016-06-23 10:36:09 +12004475
Tobin Ehlis77416de2016-07-21 17:51:22 -06004476 cb_node->in_use.fetch_add(1);
4477 dev_data->globalInFlightCmdBuffers.insert(cb_node->commandBuffer);
Chris Forbes77753912016-06-23 10:36:09 +12004478
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004479 // First Increment for all "generic" objects bound to cmd buffer, followed by special-case objects below
Tobin Ehlis77416de2016-07-21 17:51:22 -06004480 skip_call |= ValidateAndIncrementBoundObjects(dev_data, cb_node);
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004481 // TODO : We should be able to remove the NULL look-up checks from the code below as long as
4482 // all the corresponding cases are verified to cause CB_INVALID state and the CB_INVALID state
4483 // should then be flagged prior to calling this function
Tobin Ehlis77416de2016-07-21 17:51:22 -06004484 for (auto drawDataElement : cb_node->drawData) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004485 for (auto buffer : drawDataElement.buffers) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004486 auto buffer_state = getBufferState(dev_data, buffer);
4487 if (!buffer_state) {
Tobin Ehlis77416de2016-07-21 17:51:22 -06004488 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004489 (uint64_t)(buffer), __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06004490 "Cannot submit cmd buffer using deleted buffer 0x%" PRIx64 ".", (uint64_t)(buffer));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004491 } else {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004492 buffer_state->in_use.fetch_add(1);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004493 }
4494 }
4495 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004496 for (auto event : cb_node->writeEventsBeforeWait) {
Tobin Ehlis1af17132016-10-20 14:17:21 -06004497 auto event_state = getEventNode(dev_data, event);
4498 if (event_state)
4499 event_state->write_in_use++;
Michael Lentine860b0fe2016-05-20 10:14:00 -05004500 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004501 return skip_call;
4502}
4503
Tobin Ehlis829abd02016-12-06 12:53:43 -07004504// Note: This function assumes that the global lock is held by the calling thread.
4505// For the given queue, verify the queue state up to the given seq number.
4506// Currently the only check is to make sure that if there are events to be waited on prior to
4507// a QueryReset, make sure that all such events have been signalled.
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004508static bool VerifyQueueStateToSeq(layer_data *dev_data, QUEUE_STATE *queue, uint64_t seq) {
Tobin Ehlis829abd02016-12-06 12:53:43 -07004509 bool skip = false;
4510 auto queue_seq = queue->seq;
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004511 std::unordered_map<VkQueue, uint64_t> other_queue_seqs;
4512 auto sub_it = queue->submissions.begin();
Tobin Ehlis829abd02016-12-06 12:53:43 -07004513 while (queue_seq < seq) {
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004514 for (auto &wait : sub_it->waitSemaphores) {
4515 auto &last_seq = other_queue_seqs[wait.queue];
4516 last_seq = std::max(last_seq, wait.seq);
4517 }
4518 for (auto cb : sub_it->cbs) {
4519 auto cb_node = getCBNode(dev_data, cb);
Tobin Ehlis829abd02016-12-06 12:53:43 -07004520 if (cb_node) {
4521 for (auto queryEventsPair : cb_node->waitedEventsBeforeQueryReset) {
4522 for (auto event : queryEventsPair.second) {
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004523 if (dev_data->eventMap[event].needsSignaled) {
4524 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Tobin Ehlis829abd02016-12-06 12:53:43 -07004525 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, 0, DRAWSTATE_INVALID_QUERY, "DS",
4526 "Cannot get query results on queryPool 0x%" PRIx64
4527 " with index %d which was guarded by unsignaled event 0x%" PRIx64 ".",
4528 (uint64_t)(queryEventsPair.first.pool), queryEventsPair.first.index, (uint64_t)(event));
4529 }
4530 }
Michael Lentinefebf20b2016-04-20 23:01:26 -05004531 }
4532 }
4533 }
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004534 sub_it++;
Tobin Ehlis829abd02016-12-06 12:53:43 -07004535 queue_seq++;
Michael Lentinefebf20b2016-04-20 23:01:26 -05004536 }
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004537 for (auto qs : other_queue_seqs) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004538 skip |= VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, qs.first), qs.second);
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07004539 }
Tobin Ehlis829abd02016-12-06 12:53:43 -07004540 return skip;
4541}
4542
4543// When the given fence is retired, verify outstanding queue operations through the point of the fence
4544static bool VerifyQueueStateToFence(layer_data *dev_data, VkFence fence) {
4545 auto fence_state = getFenceNode(dev_data, fence);
4546 if (VK_NULL_HANDLE != fence_state->signaler.first) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004547 return VerifyQueueStateToSeq(dev_data, getQueueState(dev_data, fence_state->signaler.first), fence_state->signaler.second);
Tobin Ehlis829abd02016-12-06 12:53:43 -07004548 }
4549 return false;
Michael Lentinefebf20b2016-04-20 23:01:26 -05004550}
Chris Forbes5e3bfd62016-06-24 17:04:54 +12004551
4552// TODO: nuke this completely.
Michael Lentinefebf20b2016-04-20 23:01:26 -05004553// Decrement cmd_buffer in_use and if it goes to 0 remove cmd_buffer from globalInFlightCmdBuffers
4554static inline void removeInFlightCmdBuffer(layer_data *dev_data, VkCommandBuffer cmd_buffer) {
4555 // Pull it off of global list initially, but if we find it in any other queue list, add it back in
4556 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmd_buffer);
4557 pCB->in_use.fetch_sub(1);
4558 if (!pCB->in_use.load()) {
4559 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
4560 }
4561}
4562
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004563// Decrement in-use count for objects bound to command buffer
Tobin Ehlisee3af202016-09-07 11:09:01 -06004564static void DecrementBoundResources(layer_data *dev_data, GLOBAL_CB_NODE const *cb_node) {
Tobin Ehlis1dc7d5a2016-09-14 07:01:11 -06004565 BASE_NODE *base_obj = nullptr;
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004566 for (auto obj : cb_node->object_bindings) {
Tobin Ehlis83e14ca2016-09-14 11:21:55 -06004567 base_obj = GetStateStructPtrFromObject(dev_data, obj);
Tobin Ehlis1dc7d5a2016-09-14 07:01:11 -06004568 if (base_obj) {
4569 base_obj->in_use.fetch_sub(1);
4570 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004571 }
4572}
Chris Forbesc7d3c782016-06-22 11:57:17 +12004573
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004574static void RetireWorkOnQueue(layer_data *dev_data, QUEUE_STATE *pQueue, uint64_t seq) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004575 std::unordered_map<VkQueue, uint64_t> otherQueueSeqs;
4576
4577 // Roll this queue forward, one submission at a time.
4578 while (pQueue->seq < seq) {
4579 auto & submission = pQueue->submissions.front();
4580
4581 for (auto & wait : submission.waitSemaphores) {
4582 auto pSemaphore = getSemaphoreNode(dev_data, wait.semaphore);
Mark Lobodzinskic3343ae2016-11-11 15:27:12 -07004583 if (pSemaphore) {
4584 pSemaphore->in_use.fetch_sub(1);
4585 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004586 auto & lastSeq = otherQueueSeqs[wait.queue];
4587 lastSeq = std::max(lastSeq, wait.seq);
4588 }
4589
4590 for (auto & semaphore : submission.signalSemaphores) {
4591 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
Mark Lobodzinskic3343ae2016-11-11 15:27:12 -07004592 if (pSemaphore) {
4593 pSemaphore->in_use.fetch_sub(1);
4594 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004595 }
Chris Forbesff96dcd2016-06-16 11:47:24 +12004596
Chris Forbes8320a8d2016-08-01 15:15:30 +12004597 for (auto cb : submission.cbs) {
Tobin Ehlis77416de2016-07-21 17:51:22 -06004598 auto cb_node = getCBNode(dev_data, cb);
Mark Lobodzinskic3343ae2016-11-11 15:27:12 -07004599 if (!cb_node) {
4600 continue;
4601 }
Tobin Ehlis9252c2b2016-07-21 14:40:22 -06004602 // First perform decrement on general case bound objects
Tobin Ehlis77416de2016-07-21 17:51:22 -06004603 DecrementBoundResources(dev_data, cb_node);
4604 for (auto drawDataElement : cb_node->drawData) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004605 for (auto buffer : drawDataElement.buffers) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004606 auto buffer_state = getBufferState(dev_data, buffer);
4607 if (buffer_state) {
4608 buffer_state->in_use.fetch_sub(1);
Chris Forbes8320a8d2016-08-01 15:15:30 +12004609 }
4610 }
4611 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004612 for (auto event : cb_node->writeEventsBeforeWait) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004613 auto eventNode = dev_data->eventMap.find(event);
4614 if (eventNode != dev_data->eventMap.end()) {
4615 eventNode->second.write_in_use--;
4616 }
4617 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004618 for (auto queryStatePair : cb_node->queryToStateMap) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004619 dev_data->queryToStateMap[queryStatePair.first] = queryStatePair.second;
4620 }
Tobin Ehlis77416de2016-07-21 17:51:22 -06004621 for (auto eventStagePair : cb_node->eventToStageMap) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004622 dev_data->eventMap[eventStagePair.first].stageMask = eventStagePair.second;
4623 }
Chris Forbesff96dcd2016-06-16 11:47:24 +12004624
Chris Forbes8320a8d2016-08-01 15:15:30 +12004625 removeInFlightCmdBuffer(dev_data, cb);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004626 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004627
4628 auto pFence = getFenceNode(dev_data, submission.fence);
4629 if (pFence) {
4630 pFence->state = FENCE_RETIRED;
4631 }
4632
4633 pQueue->submissions.pop_front();
4634 pQueue->seq++;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004635 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004636
4637 // Roll other queues forward to the highest seq we saw a wait for
4638 for (auto qs : otherQueueSeqs) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004639 RetireWorkOnQueue(dev_data, getQueueState(dev_data, qs.first), qs.second);
Michael Lentineb48e1412016-04-29 18:37:32 -05004640 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004641}
Chris Forbesd73299b2016-06-10 15:25:45 +12004642
4643
4644// Submit a fence to a queue, delimiting previous fences and previous untracked
4645// work by it.
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004646static void SubmitFence(QUEUE_STATE *pQueue, FENCE_NODE *pFence, uint64_t submitCount) {
Chris Forbesff96dcd2016-06-16 11:47:24 +12004647 pFence->state = FENCE_INFLIGHT;
Chris Forbes8320a8d2016-08-01 15:15:30 +12004648 pFence->signaler.first = pQueue->queue;
4649 pFence->signaler.second = pQueue->seq + pQueue->submissions.size() + submitCount;
Michael Lentinefebf20b2016-04-20 23:01:26 -05004650}
4651
Dustin Graves8f1eab92016-04-05 09:41:17 -06004652static bool validateCommandBufferSimultaneousUse(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004653 bool skip_call = false;
4654 if (dev_data->globalInFlightCmdBuffers.count(pCB->commandBuffer) &&
4655 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004656 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07004657 0, __LINE__, VALIDATION_ERROR_00133, "DS",
4658 "Command Buffer 0x%p is already in use and is not marked for simultaneous use. %s", pCB->commandBuffer,
4659 validation_error_map[VALIDATION_ERROR_00133]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004660 }
4661 return skip_call;
4662}
4663
Tobin Ehlisf7cf9152016-09-27 13:10:33 -06004664static bool validateCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const char *call_source) {
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004665 bool skip = false;
Chris Forbesa13fe522016-10-13 15:34:59 +13004666 if (dev_data->instance_data->disabled.command_buffer_state)
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004667 return skip;
Tobin Ehlise5184af2016-04-14 15:44:20 -06004668 // Validate ONE_TIME_SUBMIT_BIT CB is not being submitted more than once
4669 if ((pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT) && (pCB->submitCount > 1)) {
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004670 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4671 __LINE__, DRAWSTATE_COMMAND_BUFFER_SINGLE_SUBMIT_VIOLATION, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004672 "Commandbuffer 0x%p was begun w/ VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT "
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004673 "set, but has been submitted 0x%" PRIxLEAST64 " times.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004674 pCB->commandBuffer, pCB->submitCount);
Tobin Ehlise5184af2016-04-14 15:44:20 -06004675 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004676 // Validate that cmd buffers have been updated
4677 if (CB_RECORDED != pCB->state) {
4678 if (CB_INVALID == pCB->state) {
4679 // Inform app of reason CB invalid
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06004680 for (auto obj : pCB->broken_bindings) {
4681 const char *type_str = object_type_to_string(obj.type);
4682 // Descriptor sets are a special case that can be either destroyed or updated to invalidated a CB
4683 const char *cause_str =
4684 (obj.type == VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT) ? "destroyed or updated" : "destroyed";
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004685
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004686 skip |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004687 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4688 reinterpret_cast<uint64_t &>(pCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004689 "You are submitting command buffer 0x%p that is invalid because bound %s 0x%" PRIxLEAST64 " was %s.",
4690 pCB->commandBuffer, type_str, obj.handle, cause_str);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004691 }
4692 } else { // Flag error for using CB w/o vkEndCommandBuffer() called
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004693 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
4694 (uint64_t)(pCB->commandBuffer), __LINE__, DRAWSTATE_NO_END_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004695 "You must call vkEndCommandBuffer() on command buffer 0x%p before this call to %s!", pCB->commandBuffer,
4696 call_source);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004697 }
4698 }
Tobin Ehlisa003ce82016-09-27 17:42:58 -06004699 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004700}
4701
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004702// Validate that queueFamilyIndices of primary command buffers match this queue
4703// Secondary command buffers were previously validated in vkCmdExecuteCommands().
4704static bool validateQueueFamilyIndices(layer_data *dev_data, GLOBAL_CB_NODE *pCB, VkQueue queue) {
4705 bool skip_call = false;
4706 auto pPool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004707 auto queue_state = getQueueState(dev_data, queue);
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004708
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004709 if (pPool && queue_state && (pPool->queueFamilyIndex != queue_state->queueFamilyIndex)) {
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004710 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07004711 reinterpret_cast<uint64_t>(pCB->commandBuffer), __LINE__, VALIDATION_ERROR_00139, "DS",
4712 "vkQueueSubmit: Primary command buffer 0x%p created in queue family %d is being submitted on queue "
4713 "0x%p from queue family %d. %s",
4714 pCB->commandBuffer, pPool->queueFamilyIndex, queue, queue_state->queueFamilyIndex,
4715 validation_error_map[VALIDATION_ERROR_00139]);
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -06004716 }
4717
4718 return skip_call;
4719}
4720
Chris Forbesc7d3c782016-06-22 11:57:17 +12004721static bool validatePrimaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004722 // Track in-use for resources off of primary and any secondary CBs
Tobin Ehlisfe871282016-06-28 10:28:02 -06004723 bool skip_call = false;
Chris Forbes77753912016-06-23 10:36:09 +12004724
4725 // If USAGE_SIMULTANEOUS_USE_BIT not set then CB cannot already be executing
4726 // on device
Tobin Ehlisfe871282016-06-28 10:28:02 -06004727 skip_call |= validateCommandBufferSimultaneousUse(dev_data, pCB);
Chris Forbes77753912016-06-23 10:36:09 +12004728
Tobin Ehlisfe871282016-06-28 10:28:02 -06004729 skip_call |= validateAndIncrementResources(dev_data, pCB);
Chris Forbes77753912016-06-23 10:36:09 +12004730
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004731 if (!pCB->secondaryCommandBuffers.empty()) {
4732 for (auto secondaryCmdBuffer : pCB->secondaryCommandBuffers) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004733 GLOBAL_CB_NODE *pSubCB = getCBNode(dev_data, secondaryCmdBuffer);
Tobin Ehlisfe871282016-06-28 10:28:02 -06004734 skip_call |= validateAndIncrementResources(dev_data, pSubCB);
Tobin Ehlis1857d612016-05-09 13:22:50 -06004735 if ((pSubCB->primaryCommandBuffer != pCB->commandBuffer) &&
4736 !(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
Mike Weiblendcca8592016-12-15 12:24:24 -07004737 log_msg(
4738 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
4739 __LINE__, VALIDATION_ERROR_00135, "DS",
4740 "Commandbuffer 0x%p was submitted with secondary buffer 0x%p but that buffer has subsequently been bound to "
4741 "primary cmd buffer 0x%p and it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set. %s",
4742 pCB->commandBuffer, secondaryCmdBuffer, pSubCB->primaryCommandBuffer,
4743 validation_error_map[VALIDATION_ERROR_00135]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004744 }
4745 }
4746 }
Chris Forbes77753912016-06-23 10:36:09 +12004747
Tobin Ehlisf7cf9152016-09-27 13:10:33 -06004748 skip_call |= validateCommandBufferState(dev_data, pCB, "vkQueueSubmit()");
Chris Forbes77753912016-06-23 10:36:09 +12004749
Tobin Ehlisfe871282016-06-28 10:28:02 -06004750 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004751}
4752
Chris Forbesfed03c92016-06-10 10:06:42 +12004753static bool
Chris Forbesd73299b2016-06-10 15:25:45 +12004754ValidateFenceForSubmit(layer_data *dev_data, FENCE_NODE *pFence)
Chris Forbesfed03c92016-06-10 10:06:42 +12004755{
Tobin Ehlisfe871282016-06-28 10:28:02 -06004756 bool skip_call = false;
Chris Forbesfed03c92016-06-10 10:06:42 +12004757
Chris Forbesd73299b2016-06-10 15:25:45 +12004758 if (pFence) {
Chris Forbesff96dcd2016-06-16 11:47:24 +12004759 if (pFence->state == FENCE_INFLIGHT) {
Mike Weiblendcca8592016-12-15 12:24:24 -07004760 // TODO: opportunities for VALIDATION_ERROR_00127, VALIDATION_ERROR_01647, VALIDATION_ERROR_01953
Tobin Ehlisfe871282016-06-28 10:28:02 -06004761 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4762 (uint64_t)(pFence->fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS",
4763 "Fence 0x%" PRIx64 " is already in use by another submission.", (uint64_t)(pFence->fence));
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06004764 }
Chris Forbesfed03c92016-06-10 10:06:42 +12004765
Chris Forbesff96dcd2016-06-16 11:47:24 +12004766 else if (pFence->state == FENCE_RETIRED) {
Mike Weiblendcca8592016-12-15 12:24:24 -07004767 // TODO: opportunities for VALIDATION_ERROR_00126, VALIDATION_ERROR_01646, VALIDATION_ERROR_01953
Tobin Ehlisfe871282016-06-28 10:28:02 -06004768 skip_call |=
4769 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
4770 reinterpret_cast<uint64_t &>(pFence->fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
4771 "Fence 0x%" PRIxLEAST64 " submitted in SIGNALED state. Fences must be reset before being submitted",
4772 reinterpret_cast<uint64_t &>(pFence->fence));
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06004773 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004774 }
Chris Forbesfed03c92016-06-10 10:06:42 +12004775
Tobin Ehlisfe871282016-06-28 10:28:02 -06004776 return skip_call;
Chris Forbesfed03c92016-06-10 10:06:42 +12004777}
4778
Chris Forbesd73299b2016-06-10 15:25:45 +12004779
Chris Forbesfed03c92016-06-10 10:06:42 +12004780VKAPI_ATTR VkResult VKAPI_CALL
4781QueueSubmit(VkQueue queue, uint32_t submitCount, const VkSubmitInfo *pSubmits, VkFence fence) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004782 bool skip_call = false;
Chris Forbesfed03c92016-06-10 10:06:42 +12004783 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
4784 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4785 std::unique_lock<std::mutex> lock(global_lock);
4786
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07004787 auto pQueue = getQueueState(dev_data, queue);
Chris Forbesd73299b2016-06-10 15:25:45 +12004788 auto pFence = getFenceNode(dev_data, fence);
Tobin Ehlisfe871282016-06-28 10:28:02 -06004789 skip_call |= ValidateFenceForSubmit(dev_data, pFence);
Chris Forbesfed03c92016-06-10 10:06:42 +12004790
Tobin Ehlisfe871282016-06-28 10:28:02 -06004791 if (skip_call) {
Chris Forbes40028e22016-06-13 09:59:34 +12004792 return VK_ERROR_VALIDATION_FAILED_EXT;
4793 }
4794
Chris Forbesd73299b2016-06-10 15:25:45 +12004795 // Mark the fence in-use.
4796 if (pFence) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004797 SubmitFence(pQueue, pFence, std::max(1u, submitCount));
Chris Forbesd73299b2016-06-10 15:25:45 +12004798 }
4799
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004800 // Now verify each individual submit
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004801 for (uint32_t submit_idx = 0; submit_idx < submitCount; submit_idx++) {
4802 const VkSubmitInfo *submit = &pSubmits[submit_idx];
Chris Forbes8320a8d2016-08-01 15:15:30 +12004803 vector<SEMAPHORE_WAIT> semaphore_waits;
4804 vector<VkSemaphore> semaphore_signals;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004805 for (uint32_t i = 0; i < submit->waitSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +12004806 VkSemaphore semaphore = submit->pWaitSemaphores[i];
4807 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
Chris Forbes220fd472016-06-21 18:59:28 +12004808 if (pSemaphore) {
4809 if (pSemaphore->signaled) {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004810 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
4811 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
4812 pSemaphore->in_use.fetch_add(1);
4813 }
4814 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes220fd472016-06-21 18:59:28 +12004815 pSemaphore->signaled = false;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004816 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004817 skip_call |=
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004818 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4819 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004820 "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
4821 reinterpret_cast<const uint64_t &>(semaphore));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004822 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004823 }
4824 }
4825 for (uint32_t i = 0; i < submit->signalSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +12004826 VkSemaphore semaphore = submit->pSignalSemaphores[i];
4827 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
4828 if (pSemaphore) {
Chris Forbes220fd472016-06-21 18:59:28 +12004829 if (pSemaphore->signaled) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004830 skip_call |=
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004831 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
4832 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004833 "Queue 0x%p is signaling semaphore 0x%" PRIx64
Mark Muelleraab36502016-05-03 13:17:29 -06004834 " that has already been signaled but not waited on by queue 0x%" PRIx64 ".",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07004835 queue, reinterpret_cast<const uint64_t &>(semaphore),
Chris Forbes8320a8d2016-08-01 15:15:30 +12004836 reinterpret_cast<uint64_t &>(pSemaphore->signaler.first));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004837 } else {
Chris Forbes8320a8d2016-08-01 15:15:30 +12004838 pSemaphore->signaler.first = queue;
4839 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
Chris Forbes220fd472016-06-21 18:59:28 +12004840 pSemaphore->signaled = true;
Chris Forbesc7d3c782016-06-22 11:57:17 +12004841 pSemaphore->in_use.fetch_add(1);
Chris Forbes8320a8d2016-08-01 15:15:30 +12004842 semaphore_signals.push_back(semaphore);
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004843 }
4844 }
4845 }
Chris Forbesd73299b2016-06-10 15:25:45 +12004846
Chris Forbesc7d3c782016-06-22 11:57:17 +12004847 std::vector<VkCommandBuffer> cbs;
4848
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004849 for (uint32_t i = 0; i < submit->commandBufferCount; i++) {
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004850 auto cb_node = getCBNode(dev_data, submit->pCommandBuffers[i]);
4851 skip_call |= ValidateCmdBufImageLayouts(dev_data, cb_node);
4852 if (cb_node) {
Chris Forbesc7d3c782016-06-22 11:57:17 +12004853 cbs.push_back(submit->pCommandBuffers[i]);
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004854 for (auto secondaryCmdBuffer : cb_node->secondaryCommandBuffers) {
Chris Forbesc7d3c782016-06-22 11:57:17 +12004855 cbs.push_back(secondaryCmdBuffer);
Chris Forbesd73299b2016-06-10 15:25:45 +12004856 }
4857
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004858 cb_node->submitCount++; // increment submit count
4859 skip_call |= validatePrimaryCommandBufferState(dev_data, cb_node);
4860 skip_call |= validateQueueFamilyIndices(dev_data, cb_node, queue);
Tobin Ehlis545fae32016-09-06 20:37:01 -06004861 // Potential early exit here as bad object state may crash in delayed function calls
4862 if (skip_call)
4863 return result;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06004864 // Call submit-time functions to validate/update state
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004865 for (auto &function : cb_node->validate_functions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004866 skip_call |= function();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004867 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004868 for (auto &function : cb_node->eventUpdates) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004869 skip_call |= function(queue);
Michael Lentineb653eb22016-03-18 14:11:44 -05004870 }
Tobin Ehlisc0d36802016-10-20 10:09:44 -06004871 for (auto &function : cb_node->queryUpdates) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06004872 skip_call |= function(queue);
Michael Lentine5627e692016-05-20 17:45:02 -05004873 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004874 }
4875 }
Chris Forbesc7d3c782016-06-22 11:57:17 +12004876
Chris Forbes8320a8d2016-08-01 15:15:30 +12004877 pQueue->submissions.emplace_back(cbs, semaphore_waits, semaphore_signals,
4878 submit_idx == submitCount - 1 ? fence : VK_NULL_HANDLE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004879 }
Chris Forbes8320a8d2016-08-01 15:15:30 +12004880
4881 if (pFence && !submitCount) {
4882 // If no submissions, but just dropping a fence on the end of the queue,
4883 // record an empty submission with just the fence, so we can determine
4884 // its completion.
4885 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
4886 std::vector<SEMAPHORE_WAIT>(),
4887 std::vector<VkSemaphore>(),
4888 fence);
4889 }
4890
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004891 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06004892 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13004893 result = dev_data->dispatch_table.QueueSubmit(queue, submitCount, pSubmits, fence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004894
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004895 return result;
4896}
4897
Karl Schultz8e3bccf2016-11-11 16:09:47 -07004898static bool PreCallValidateAllocateMemory(layer_data *dev_data) {
4899 bool skip = false;
4900 if (dev_data->memObjMap.size() >= dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount) {
4901 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
4902 reinterpret_cast<const uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_00611, "MEM",
4903 "Number of currently valid memory objects is not less than the maximum allowed (%u). %s",
4904 dev_data->phys_dev_properties.properties.limits.maxMemoryAllocationCount,
4905 validation_error_map[VALIDATION_ERROR_00611]);
4906 }
4907 return skip;
4908}
4909
4910static void PostCallRecordAllocateMemory(layer_data *dev_data, const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) {
4911 add_mem_obj_info(dev_data, dev_data->device, *pMemory, pAllocateInfo);
Karl Schultz8e3bccf2016-11-11 16:09:47 -07004912 return;
4913}
4914
Chia-I Wu629d7cd2016-05-06 11:32:54 +08004915VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
4916 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
Karl Schultz8e3bccf2016-11-11 16:09:47 -07004917 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
4918 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4919 std::unique_lock<std::mutex> lock(global_lock);
4920 bool skip = PreCallValidateAllocateMemory(dev_data);
4921 if (!skip) {
4922 lock.unlock();
4923 result = dev_data->dispatch_table.AllocateMemory(device, pAllocateInfo, pAllocator, pMemory);
4924 lock.lock();
4925 if (VK_SUCCESS == result) {
4926 PostCallRecordAllocateMemory(dev_data, pAllocateInfo, pMemory);
4927 }
Mark Lobodzinski8fbf5712016-11-14 08:18:09 -07004928 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004929 return result;
4930}
4931
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004932// For given obj node, if it is use, flag a validation error and return callback result, else return false
4933bool ValidateObjectNotInUse(const layer_data *dev_data, BASE_NODE *obj_node, VK_OBJECT obj_struct,
4934 UNIQUE_VALIDATION_ERROR_CODE error_code) {
4935 if (dev_data->instance_data->disabled.object_in_use)
4936 return false;
4937 bool skip = false;
4938 if (obj_node->in_use.load()) {
4939 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_struct.type, obj_struct.handle, __LINE__,
4940 error_code, "DS", "Cannot delete %s 0x%" PRIx64 " that is currently in use by a command buffer. %s",
4941 object_type_to_string(obj_struct.type), obj_struct.handle, validation_error_map[error_code]);
4942 }
4943 return skip;
4944}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004945
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004946static bool PreCallValidateFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO **mem_info, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06004947 *mem_info = getMemObjInfo(dev_data, mem);
4948 *obj_struct = {reinterpret_cast<uint64_t &>(mem), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT};
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004949 if (dev_data->instance_data->disabled.free_memory)
4950 return false;
4951 bool skip = false;
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004952 if (*mem_info) {
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004953 skip |= ValidateObjectNotInUse(dev_data, *mem_info, *obj_struct, VALIDATION_ERROR_00620);
4954 }
4955 return skip;
4956}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004957
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004958static void PostCallRecordFreeMemory(layer_data *dev_data, VkDeviceMemory mem, DEVICE_MEM_INFO *mem_info, VK_OBJECT obj_struct) {
4959 // Clear mem binding for any bound objects
Tobin Ehlis5b38d772016-10-25 22:00:47 -06004960 for (auto obj : mem_info->obj_bindings) {
4961 log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__, MEMTRACK_FREED_MEM_REF,
4962 "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64, obj.handle,
4963 (uint64_t)mem_info->mem);
4964 switch (obj.type) {
4965 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
4966 auto image_state = getImageState(dev_data, reinterpret_cast<VkImage &>(obj.handle));
4967 assert(image_state); // Any destroyed images should already be removed from bindings
4968 image_state->binding.mem = MEMORY_UNBOUND;
4969 break;
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004970 }
Tobin Ehlis5b38d772016-10-25 22:00:47 -06004971 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07004972 auto buffer_state = getBufferState(dev_data, reinterpret_cast<VkBuffer &>(obj.handle));
4973 assert(buffer_state); // Any destroyed buffers should already be removed from bindings
4974 buffer_state->binding.mem = MEMORY_UNBOUND;
Tobin Ehlis5b38d772016-10-25 22:00:47 -06004975 break;
4976 }
4977 default:
4978 // Should only have buffer or image objects bound to memory
4979 assert(0);
4980 }
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004981 }
4982 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07004983 invalidateCommandBuffers(dev_data, mem_info->cb_bindings, obj_struct);
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004984 dev_data->memObjMap.erase(mem);
4985}
4986
4987VKAPI_ATTR void VKAPI_CALL FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
4988 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
4989 DEVICE_MEM_INFO *mem_info = nullptr;
4990 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06004991 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4947f1d2016-10-20 11:02:03 -06004992 bool skip = PreCallValidateFreeMemory(dev_data, mem, &mem_info, &obj_struct);
4993 if (!skip) {
4994 lock.unlock();
4995 dev_data->dispatch_table.FreeMemory(device, mem, pAllocator);
4996 lock.lock();
4997 PostCallRecordFreeMemory(dev_data, mem, mem_info, obj_struct);
Mark Muellerf377ffb2016-07-11 15:03:44 -06004998 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07004999}
5000
Tobin Ehlisb495d5f2016-08-04 09:33:02 -06005001// Validate that given Map memory range is valid. This means that the memory should not already be mapped,
5002// and that the size of the map range should be:
5003// 1. Not zero
5004// 2. Within the size of the memory allocation
5005static bool ValidateMapMemRange(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005006 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005007
5008 if (size == 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005009 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5010 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5011 "VkMapMemory: Attempting to map memory range of size zero");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005012 }
5013
5014 auto mem_element = my_data->memObjMap.find(mem);
5015 if (mem_element != my_data->memObjMap.end()) {
Tobin Ehlis997b2582016-06-02 08:43:37 -06005016 auto mem_info = mem_element->second.get();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005017 // It is an application error to call VkMapMemory on an object that is already mapped
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005018 if (mem_info->mem_range.size != 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005019 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5020 (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP, "MEM",
5021 "VkMapMemory: Attempting to map memory on an already-mapped object 0x%" PRIxLEAST64, (uint64_t)mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005022 }
5023
5024 // Validate that offset + size is within object's allocationSize
5025 if (size == VK_WHOLE_SIZE) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005026 if (offset >= mem_info->alloc_info.allocationSize) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005027 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5028 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_INVALID_MAP,
5029 "MEM", "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64
5030 " with size of VK_WHOLE_SIZE oversteps total array size 0x%" PRIx64,
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005031 offset, mem_info->alloc_info.allocationSize, mem_info->alloc_info.allocationSize);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005032 }
5033 } else {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005034 if ((offset + size) > mem_info->alloc_info.allocationSize) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005035 skip_call = log_msg(
5036 my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5037 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00628, "MEM",
5038 "Mapping Memory from 0x%" PRIx64 " to 0x%" PRIx64 " oversteps total array size 0x%" PRIx64 ". %s", offset,
5039 size + offset, mem_info->alloc_info.allocationSize, validation_error_map[VALIDATION_ERROR_00628]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005040 }
5041 }
5042 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06005043 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005044}
5045
Dustin Graves8f1eab92016-04-05 09:41:17 -06005046static void storeMemRanges(layer_data *my_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size) {
Tobin Ehlis997b2582016-06-02 08:43:37 -06005047 auto mem_info = getMemObjInfo(my_data, mem);
5048 if (mem_info) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005049 mem_info->mem_range.offset = offset;
5050 mem_info->mem_range.size = size;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005051 }
5052}
5053
Dustin Graves8f1eab92016-04-05 09:41:17 -06005054static bool deleteMemRanges(layer_data *my_data, VkDeviceMemory mem) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005055 bool skip_call = false;
Tobin Ehlis997b2582016-06-02 08:43:37 -06005056 auto mem_info = getMemObjInfo(my_data, mem);
5057 if (mem_info) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005058 if (!mem_info->mem_range.size) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005059 // Valid Usage: memory must currently be mapped
Tobin Ehlisfe871282016-06-28 10:28:02 -06005060 skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07005061 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00649, "MEM",
5062 "Unmapping Memory without memory being mapped: mem obj 0x%" PRIxLEAST64 ". %s", (uint64_t)mem,
5063 validation_error_map[VALIDATION_ERROR_00649]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005064 }
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005065 mem_info->mem_range.size = 0;
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005066 if (mem_info->shadow_copy) {
5067 free(mem_info->shadow_copy_base);
5068 mem_info->shadow_copy_base = 0;
5069 mem_info->shadow_copy = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005070 }
5071 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06005072 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005073}
5074
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005075// Guard value for pad data
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005076static char NoncoherentMemoryFillValue = 0xb;
5077
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005078static void initializeAndTrackMemory(layer_data *dev_data, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size,
5079 void **ppData) {
Tobin Ehlis997b2582016-06-02 08:43:37 -06005080 auto mem_info = getMemObjInfo(dev_data, mem);
5081 if (mem_info) {
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005082 mem_info->p_driver_data = *ppData;
5083 uint32_t index = mem_info->alloc_info.memoryTypeIndex;
Tobin Ehlise54be7b2016-04-11 14:49:55 -06005084 if (dev_data->phys_dev_mem_props.memoryTypes[index].propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005085 mem_info->shadow_copy = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005086 } else {
5087 if (size == VK_WHOLE_SIZE) {
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005088 size = mem_info->alloc_info.allocationSize - offset;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005089 }
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005090 mem_info->shadow_pad_size = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5091 assert(vk_safe_modulo(mem_info->shadow_pad_size,
5092 dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment) == 0);
5093 // Ensure start of mapped region reflects hardware alignment constraints
5094 uint64_t map_alignment = dev_data->phys_dev_properties.properties.limits.minMemoryMapAlignment;
5095
5096 // From spec: (ppData - offset) must be aligned to at least limits::minMemoryMapAlignment.
5097 uint64_t start_offset = offset % map_alignment;
5098 // Data passed to driver will be wrapped by a guardband of data to detect over- or under-writes.
Mark Lobodzinski4dc768c2016-10-03 16:01:12 -06005099 mem_info->shadow_copy_base = malloc(static_cast<size_t>(2 * mem_info->shadow_pad_size + size + map_alignment + start_offset));
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005100
5101 mem_info->shadow_copy =
5102 reinterpret_cast<char *>((reinterpret_cast<uintptr_t>(mem_info->shadow_copy_base) + map_alignment) &
5103 ~(map_alignment - 1)) + start_offset;
5104 assert(vk_safe_modulo(reinterpret_cast<uintptr_t>(mem_info->shadow_copy) + mem_info->shadow_pad_size - start_offset,
5105 map_alignment) == 0);
5106
Mark Lobodzinski4dc768c2016-10-03 16:01:12 -06005107 memset(mem_info->shadow_copy, NoncoherentMemoryFillValue, static_cast<size_t>(2 * mem_info->shadow_pad_size + size));
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005108 *ppData = static_cast<char *>(mem_info->shadow_copy) + mem_info->shadow_pad_size;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005109 }
5110 }
5111}
Mark Lobodzinski066b8422016-08-15 14:27:26 -06005112
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005113// Verify that state for fence being waited on is appropriate. That is,
Chris Forbes8320a8d2016-08-01 15:15:30 +12005114// a fence being waited on should not already be signaled and
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005115// it should have been submitted on a queue or during acquire next image
Chris Forbes5141e922016-06-15 13:10:28 +12005116static inline bool verifyWaitFenceState(layer_data *dev_data, VkFence fence, const char *apiCall) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005117 bool skip_call = false;
Chris Forbes53a17362016-06-15 13:03:31 +12005118
Chris Forbes5141e922016-06-15 13:10:28 +12005119 auto pFence = getFenceNode(dev_data, fence);
Chris Forbes53a17362016-06-15 13:03:31 +12005120 if (pFence) {
Chris Forbesff96dcd2016-06-16 11:47:24 +12005121 if (pFence->state == FENCE_UNSIGNALED) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06005122 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5123 reinterpret_cast<uint64_t &>(fence), __LINE__, MEMTRACK_INVALID_FENCE_STATE, "MEM",
5124 "%s called for fence 0x%" PRIxLEAST64 " which has not been submitted on a Queue or during "
5125 "acquire next image.",
5126 apiCall, reinterpret_cast<uint64_t &>(fence));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005127 }
5128 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06005129 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005130}
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005131
Tobin Ehlis829abd02016-12-06 12:53:43 -07005132static void RetireFence(layer_data *dev_data, VkFence fence) {
Chris Forbes61e8ff52016-09-21 11:18:30 +12005133 auto pFence = getFenceNode(dev_data, fence);
5134 if (pFence->signaler.first != VK_NULL_HANDLE) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07005135 // Fence signaller is a queue -- use this as proof that prior operations on that queue have completed.
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005136 RetireWorkOnQueue(dev_data, getQueueState(dev_data, pFence->signaler.first), pFence->signaler.second);
Chris Forbes61e8ff52016-09-21 11:18:30 +12005137 }
Chris Forbes22384062016-09-21 13:36:19 +12005138 else {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07005139 // Fence signaller is the WSI. We're not tracking what the WSI op actually /was/ in CV yet, but we need to mark
5140 // the fence as retired.
Chris Forbes22384062016-09-21 13:36:19 +12005141 pFence->state = FENCE_RETIRED;
Chris Forbes22384062016-09-21 13:36:19 +12005142 }
Chris Forbes61e8ff52016-09-21 11:18:30 +12005143}
5144
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005145static bool PreCallValidateWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005146 if (dev_data->instance_data->disabled.wait_for_fences)
5147 return false;
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005148 bool skip = false;
5149 for (uint32_t i = 0; i < fence_count; i++) {
5150 skip |= verifyWaitFenceState(dev_data, fences[i], "vkWaitForFences");
Tobin Ehlis829abd02016-12-06 12:53:43 -07005151 skip |= VerifyQueueStateToFence(dev_data, fences[i]);
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005152 }
5153 return skip;
5154}
5155
Tobin Ehlis829abd02016-12-06 12:53:43 -07005156static void PostCallRecordWaitForFences(layer_data *dev_data, uint32_t fence_count, const VkFence *fences, VkBool32 wait_all) {
5157 // When we know that all fences are complete we can clean/remove their CBs
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005158 if ((VK_TRUE == wait_all) || (1 == fence_count)) {
5159 for (uint32_t i = 0; i < fence_count; i++) {
Tobin Ehlis829abd02016-12-06 12:53:43 -07005160 RetireFence(dev_data, fences[i]);
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005161 }
5162 }
5163 // NOTE : Alternate case not handled here is when some fences have completed. In
5164 // this case for app to guarantee which fences completed it will have to call
Tobin Ehlis829abd02016-12-06 12:53:43 -07005165 // vkGetFenceStatus() at which point we'll clean/remove their CBs if complete.
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005166}
5167
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005168VKAPI_ATTR VkResult VKAPI_CALL
5169WaitForFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences, VkBool32 waitAll, uint64_t timeout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005170 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005171 // Verify fence status of submitted fences
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005172 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005173 bool skip = PreCallValidateWaitForFences(dev_data, fenceCount, pFences);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005174 lock.unlock();
Tobin Ehlisc1b5cbb2016-12-06 07:38:48 -07005175 if (skip)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005176 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005177
Chris Forbesaaa9c282016-10-03 20:01:14 +13005178 VkResult result = dev_data->dispatch_table.WaitForFences(device, fenceCount, pFences, waitAll, timeout);
Mark Muelleraab36502016-05-03 13:17:29 -06005179
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005180 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005181 lock.lock();
Tobin Ehlis829abd02016-12-06 12:53:43 -07005182 PostCallRecordWaitForFences(dev_data, fenceCount, pFences, waitAll);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005183 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005184 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005185 return result;
5186}
5187
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005188static bool PreCallValidateGetFenceStatus(layer_data *dev_data, VkFence fence) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005189 if (dev_data->instance_data->disabled.get_fence_state)
5190 return false;
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005191 return verifyWaitFenceState(dev_data, fence, "vkGetFenceStatus");
5192}
5193
Tobin Ehlis829abd02016-12-06 12:53:43 -07005194static void PostCallRecordGetFenceStatus(layer_data *dev_data, VkFence fence) { RetireFence(dev_data, fence); }
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005195
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005196VKAPI_ATTR VkResult VKAPI_CALL GetFenceStatus(VkDevice device, VkFence fence) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005197 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005198 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005199 bool skip = PreCallValidateGetFenceStatus(dev_data, fence);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005200 lock.unlock();
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005201 if (skip)
Tobin Ehlisfe871282016-06-28 10:28:02 -06005202 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06005203
Chris Forbesaaa9c282016-10-03 20:01:14 +13005204 VkResult result = dev_data->dispatch_table.GetFenceStatus(device, fence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005205 if (result == VK_SUCCESS) {
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005206 lock.lock();
Tobin Ehlis829abd02016-12-06 12:53:43 -07005207 PostCallRecordGetFenceStatus(dev_data, fence);
Tobin Ehlis2ec4bf82016-12-06 08:26:05 -07005208 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005209 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005210 return result;
5211}
5212
Tobin Ehlis75fcac72016-12-15 11:34:33 -07005213static void PostCallRecordGetDeviceQueue(layer_data *dev_data, uint32_t q_family_index, VkQueue queue) {
5214 // Add queue to tracking set only if it is new
5215 auto result = dev_data->queues.emplace(queue);
5216 if (result.second == true) {
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005217 QUEUE_STATE *queue_state = &dev_data->queueMap[queue];
Tobin Ehlis75fcac72016-12-15 11:34:33 -07005218 queue_state->queue = queue;
5219 queue_state->queueFamilyIndex = q_family_index;
5220 queue_state->seq = 0;
5221 }
5222}
5223
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005224VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex,
Mark Lobodzinski600e93d2016-03-29 09:49:15 -06005225 VkQueue *pQueue) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005226 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13005227 dev_data->dispatch_table.GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005228 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinski600e93d2016-03-29 09:49:15 -06005229
Tobin Ehlis75fcac72016-12-15 11:34:33 -07005230 PostCallRecordGetDeviceQueue(dev_data, queueFamilyIndex, *pQueue);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005231}
5232
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005233static bool PreCallValidateQueueWaitIdle(layer_data *dev_data, VkQueue queue, QUEUE_STATE **queue_state) {
5234 *queue_state = getQueueState(dev_data, queue);
Tobin Ehlis5161de32016-12-06 14:47:52 -07005235 if (dev_data->instance_data->disabled.queue_wait_idle)
5236 return false;
5237 return VerifyQueueStateToSeq(dev_data, *queue_state, (*queue_state)->seq + (*queue_state)->submissions.size());
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005238}
5239
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005240static void PostCallRecordQueueWaitIdle(layer_data *dev_data, QUEUE_STATE *queue_state) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005241 RetireWorkOnQueue(dev_data, queue_state, queue_state->seq + queue_state->submissions.size());
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005242}
5243
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005244VKAPI_ATTR VkResult VKAPI_CALL QueueWaitIdle(VkQueue queue) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005245 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -07005246 QUEUE_STATE *queue_state = nullptr;
Chris Forbes8320a8d2016-08-01 15:15:30 +12005247 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005248 bool skip = PreCallValidateQueueWaitIdle(dev_data, queue, &queue_state);
Chris Forbes8320a8d2016-08-01 15:15:30 +12005249 lock.unlock();
Tobin Ehliscd8e8c82016-12-07 11:20:02 -07005250 if (skip)
5251 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13005252 VkResult result = dev_data->dispatch_table.QueueWaitIdle(queue);
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005253 if (VK_SUCCESS == result) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005254 lock.lock();
5255 PostCallRecordQueueWaitIdle(dev_data, queue_state);
5256 lock.unlock();
Tobin Ehlis4d8e6a42016-12-06 13:20:09 -07005257 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005258 return result;
5259}
5260
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005261static bool PreCallValidateDeviceWaitIdle(layer_data *dev_data) {
Tobin Ehlis5161de32016-12-06 14:47:52 -07005262 if (dev_data->instance_data->disabled.device_wait_idle)
5263 return false;
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005264 bool skip = false;
5265 for (auto &queue : dev_data->queueMap) {
5266 skip |= VerifyQueueStateToSeq(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
5267 }
5268 return skip;
5269}
5270
5271static void PostCallRecordDeviceWaitIdle(layer_data *dev_data) {
5272 for (auto &queue : dev_data->queueMap) {
Tobin Ehlis829abd02016-12-06 12:53:43 -07005273 RetireWorkOnQueue(dev_data, &queue.second, queue.second.seq + queue.second.submissions.size());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005274 }
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005275}
5276
5277VKAPI_ATTR VkResult VKAPI_CALL DeviceWaitIdle(VkDevice device) {
5278 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5279 std::unique_lock<std::mutex> lock(global_lock);
5280 bool skip = PreCallValidateDeviceWaitIdle(dev_data);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005281 lock.unlock();
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005282 if (skip)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005283 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13005284 VkResult result = dev_data->dispatch_table.DeviceWaitIdle(device);
Tobin Ehlis08aacd92016-12-06 13:08:18 -07005285 if (VK_SUCCESS == result) {
5286 lock.lock();
5287 PostCallRecordDeviceWaitIdle(dev_data);
5288 lock.unlock();
5289 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005290 return result;
5291}
5292
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005293static bool PreCallValidateDestroyFence(layer_data *dev_data, VkFence fence, FENCE_NODE **fence_node, VK_OBJECT *obj_struct) {
5294 *fence_node = getFenceNode(dev_data, fence);
5295 *obj_struct = {reinterpret_cast<uint64_t &>(fence), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT};
Tobin Ehlis653c5222016-12-15 14:35:21 -07005296 if (dev_data->instance_data->disabled.destroy_fence)
5297 return false;
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005298 bool skip = false;
5299 if (*fence_node) {
5300 if ((*fence_node)->state == FENCE_INFLIGHT) {
5301 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
5302 (uint64_t)(fence), __LINE__, DRAWSTATE_INVALID_FENCE, "DS", "Fence 0x%" PRIx64 " is in use.",
5303 (uint64_t)(fence));
5304 }
5305 }
5306 return skip;
5307}
5308
5309static void PostCallRecordDestroyFence(layer_data *dev_data, VkFence fence) { dev_data->fenceMap.erase(fence); }
5310
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005311VKAPI_ATTR void VKAPI_CALL DestroyFence(VkDevice device, VkFence fence, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005312 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005313 // Common data objects used pre & post call
5314 FENCE_NODE *fence_node = nullptr;
5315 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005316 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005317 bool skip = PreCallValidateDestroyFence(dev_data, fence, &fence_node, &obj_struct);
Tobin Ehlis9984f1e2016-04-12 10:49:41 -06005318
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005319 if (!skip) {
5320 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005321 dev_data->dispatch_table.DestroyFence(device, fence, pAllocator);
Tobin Ehlisaf62c4a2016-12-15 14:18:39 -07005322 lock.lock();
5323 PostCallRecordDestroyFence(dev_data, fence);
5324 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005325}
5326
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005327static bool PreCallValidateDestroySemaphore(layer_data *dev_data, VkSemaphore semaphore, SEMAPHORE_NODE **sema_node,
5328 VK_OBJECT *obj_struct) {
5329 *sema_node = getSemaphoreNode(dev_data, semaphore);
5330 *obj_struct = {reinterpret_cast<uint64_t &>(semaphore), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT};
Tobin Ehlisfdedd1e2016-12-15 14:36:56 -07005331 if (dev_data->instance_data->disabled.destroy_semaphore)
5332 return false;
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005333 bool skip = false;
5334 if (*sema_node) {
5335 skip |= ValidateObjectNotInUse(dev_data, *sema_node, *obj_struct, VALIDATION_ERROR_00199);
5336 }
5337 return skip;
5338}
5339
5340static void PostCallRecordDestroySemaphore(layer_data *dev_data, VkSemaphore sema) { dev_data->semaphoreMap.erase(sema); }
5341
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005342VKAPI_ATTR void VKAPI_CALL
5343DestroySemaphore(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005344 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005345 SEMAPHORE_NODE *sema_node;
5346 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06005347 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005348 bool skip = PreCallValidateDestroySemaphore(dev_data, semaphore, &sema_node, &obj_struct);
Tobin Ehlis7d1dd142016-08-18 08:23:30 -06005349 if (!skip) {
Tobin Ehlis7d1dd142016-08-18 08:23:30 -06005350 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005351 dev_data->dispatch_table.DestroySemaphore(device, semaphore, pAllocator);
Tobin Ehlis6ed8e142016-12-15 14:32:22 -07005352 lock.lock();
5353 PostCallRecordDestroySemaphore(dev_data, semaphore);
Mark Mueller0782cea2016-08-24 10:42:17 -06005354 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005355}
5356
Tobin Ehlis1af17132016-10-20 14:17:21 -06005357static bool PreCallValidateDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE **event_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005358 *event_state = getEventNode(dev_data, event);
5359 *obj_struct = {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT};
Tobin Ehlis66aba772016-10-20 14:13:24 -06005360 if (dev_data->instance_data->disabled.destroy_event)
5361 return false;
5362 bool skip = false;
Tobin Ehlis66aba772016-10-20 14:13:24 -06005363 if (*event_state) {
Tobin Ehlis66aba772016-10-20 14:13:24 -06005364 skip |= ValidateObjectNotInUse(dev_data, *event_state, *obj_struct, VALIDATION_ERROR_00213);
5365 }
5366 return skip;
5367}
5368
Tobin Ehlis1af17132016-10-20 14:17:21 -06005369static void PostCallRecordDestroyEvent(layer_data *dev_data, VkEvent event, EVENT_STATE *event_state, VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005370 invalidateCommandBuffers(dev_data, event_state->cb_bindings, obj_struct);
Tobin Ehlis66aba772016-10-20 14:13:24 -06005371 dev_data->eventMap.erase(event);
5372}
5373
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005374VKAPI_ATTR void VKAPI_CALL DestroyEvent(VkDevice device, VkEvent event, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005375 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis1af17132016-10-20 14:17:21 -06005376 EVENT_STATE *event_state = nullptr;
Tobin Ehlis66aba772016-10-20 14:13:24 -06005377 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005378 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis66aba772016-10-20 14:13:24 -06005379 bool skip = PreCallValidateDestroyEvent(dev_data, event, &event_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005380 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005381 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005382 dev_data->dispatch_table.DestroyEvent(device, event, pAllocator);
Tobin Ehlis66aba772016-10-20 14:13:24 -06005383 lock.lock();
5384 PostCallRecordDestroyEvent(dev_data, event, event_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005385 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005386}
5387
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005388static bool PreCallValidateDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE **qp_state,
5389 VK_OBJECT *obj_struct) {
5390 *qp_state = getQueryPoolNode(dev_data, query_pool);
5391 *obj_struct = {reinterpret_cast<uint64_t &>(query_pool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT};
Tobin Ehlis45315152016-12-16 15:02:44 -07005392 if (dev_data->instance_data->disabled.destroy_query_pool)
5393 return false;
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005394 bool skip = false;
5395 if (*qp_state) {
5396 skip |= ValidateObjectNotInUse(dev_data, *qp_state, *obj_struct, VALIDATION_ERROR_01012);
5397 }
5398 return skip;
5399}
5400
5401static void PostCallRecordDestroyQueryPool(layer_data *dev_data, VkQueryPool query_pool, QUERY_POOL_NODE *qp_state, VK_OBJECT obj_struct) {
5402 invalidateCommandBuffers(dev_data, qp_state->cb_bindings, obj_struct);
5403 dev_data->queryPoolMap.erase(query_pool);
5404}
5405
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005406VKAPI_ATTR void VKAPI_CALL
5407DestroyQueryPool(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06005408 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005409 QUERY_POOL_NODE *qp_state = nullptr;
5410 VK_OBJECT obj_struct;
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06005411 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005412 bool skip = PreCallValidateDestroyQueryPool(dev_data, queryPool, &qp_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005413 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005414 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005415 dev_data->dispatch_table.DestroyQueryPool(device, queryPool, pAllocator);
Tobin Ehlisd8f55352016-12-16 14:56:41 -07005416 lock.lock();
5417 PostCallRecordDestroyQueryPool(dev_data, queryPool, qp_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06005418 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005419}
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005420static bool PreCallValidateGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5421 uint32_t query_count, VkQueryResultFlags flags,
5422 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5423 for (auto cmd_buffer : dev_data->globalInFlightCmdBuffers) {
5424 auto cb = getCBNode(dev_data, cmd_buffer);
5425 for (auto query_state_pair : cb->queryToStateMap) {
5426 (*queries_in_flight)[query_state_pair.first].push_back(cmd_buffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005427 }
5428 }
Tobin Ehlis4fbe1c22016-12-19 10:20:43 -07005429 if (dev_data->instance_data->disabled.get_query_pool_results)
5430 return false;
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005431 bool skip = false;
5432 for (uint32_t i = 0; i < query_count; ++i) {
5433 QueryObject query = {query_pool, first_query + i};
5434 auto qif_pair = queries_in_flight->find(query);
5435 auto query_state_pair = dev_data->queryToStateMap.find(query);
5436 if (query_state_pair != dev_data->queryToStateMap.end()) {
Mark Lobodzinski6a312182016-04-01 15:58:32 -06005437 // Available and in flight
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005438 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5439 query_state_pair->second) {
5440 for (auto cmd_buffer : qif_pair->second) {
5441 auto cb = getCBNode(dev_data, cmd_buffer);
5442 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5443 if (query_event_pair == cb->waitedEventsBeforeQueryReset.end()) {
5444 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5445 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5446 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is in flight.",
5447 (uint64_t)(query_pool), first_query + i);
5448 }
5449 }
5450 // Unavailable and in flight
5451 } else if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5452 !query_state_pair->second) {
5453 // TODO : Can there be the same query in use by multiple command buffers in flight?
5454 bool make_available = false;
5455 for (auto cmd_buffer : qif_pair->second) {
5456 auto cb = getCBNode(dev_data, cmd_buffer);
5457 make_available |= cb->queryToStateMap[query];
5458 }
5459 if (!(((flags & VK_QUERY_RESULT_PARTIAL_BIT) || (flags & VK_QUERY_RESULT_WAIT_BIT)) && make_available)) {
5460 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5461 VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0, __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5462 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5463 (uint64_t)(query_pool), first_query + i);
5464 }
5465 // Unavailable
5466 } else if (query_state_pair != dev_data->queryToStateMap.end() && !query_state_pair->second) {
5467 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5468 __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5469 "Cannot get query results on queryPool 0x%" PRIx64 " with index %d which is unavailable.",
5470 (uint64_t)(query_pool), first_query + i);
5471 // Uninitialized
5472 } else if (query_state_pair == dev_data->queryToStateMap.end()) {
5473 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
5474 __LINE__, DRAWSTATE_INVALID_QUERY, "DS",
5475 "Cannot get query results on queryPool 0x%" PRIx64
5476 " with index %d as data has not been collected for this index.",
5477 (uint64_t)(query_pool), first_query + i);
5478 }
5479 }
5480 }
5481 return skip;
5482}
5483
5484static void PostCallRecordGetQueryPoolResults(layer_data *dev_data, VkQueryPool query_pool, uint32_t first_query,
5485 uint32_t query_count,
5486 unordered_map<QueryObject, vector<VkCommandBuffer>> *queries_in_flight) {
5487 for (uint32_t i = 0; i < query_count; ++i) {
5488 QueryObject query = {query_pool, first_query + i};
5489 auto qif_pair = queries_in_flight->find(query);
5490 auto query_state_pair = dev_data->queryToStateMap.find(query);
5491 if (query_state_pair != dev_data->queryToStateMap.end()) {
5492 // Available and in flight
5493 if (qif_pair != queries_in_flight->end() && query_state_pair != dev_data->queryToStateMap.end() &&
5494 query_state_pair->second) {
5495 for (auto cmd_buffer : qif_pair->second) {
5496 auto cb = getCBNode(dev_data, cmd_buffer);
5497 auto query_event_pair = cb->waitedEventsBeforeQueryReset.find(query);
5498 if (query_event_pair != cb->waitedEventsBeforeQueryReset.end()) {
5499 for (auto event : query_event_pair->second) {
Mark Lobodzinski6a312182016-04-01 15:58:32 -06005500 dev_data->eventMap[event].needsSignaled = true;
5501 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005502 }
5503 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005504 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005505 }
5506 }
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005507}
5508
5509VKAPI_ATTR VkResult VKAPI_CALL GetQueryPoolResults(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
5510 size_t dataSize, void *pData, VkDeviceSize stride, VkQueryResultFlags flags) {
5511 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5512 unordered_map<QueryObject, vector<VkCommandBuffer>> queries_in_flight;
5513 std::unique_lock<std::mutex> lock(global_lock);
5514 bool skip = PreCallValidateGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, flags, &queries_in_flight);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005515 lock.unlock();
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005516 if (skip)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005517 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlis95f045b2016-12-19 10:11:50 -07005518 VkResult result =
5519 dev_data->dispatch_table.GetQueryPoolResults(device, queryPool, firstQuery, queryCount, dataSize, pData, stride, flags);
5520 lock.lock();
5521 PostCallRecordGetQueryPoolResults(dev_data, queryPool, firstQuery, queryCount, &queries_in_flight);
5522 lock.unlock();
5523 return result;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005524}
5525
Dustin Graves8f1eab92016-04-05 09:41:17 -06005526static bool validateIdleBuffer(const layer_data *my_data, VkBuffer buffer) {
5527 bool skip_call = false;
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005528 auto buffer_state = getBufferState(my_data, buffer);
5529 if (!buffer_state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005530 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5531 (uint64_t)(buffer), __LINE__, DRAWSTATE_DOUBLE_DESTROY, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06005532 "Cannot free buffer 0x%" PRIxLEAST64 " that has not been allocated.", (uint64_t)(buffer));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005533 } else {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005534 if (buffer_state->in_use.load()) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005535 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
Mike Weiblendcca8592016-12-15 12:24:24 -07005536 (uint64_t)(buffer), __LINE__, VALIDATION_ERROR_00676, "DS",
5537 "Cannot free buffer 0x%" PRIxLEAST64 " that is in use by a command buffer. %s", (uint64_t)(buffer),
5538 validation_error_map[VALIDATION_ERROR_00676]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005539 }
5540 }
5541 return skip_call;
5542}
5543
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005544// Return true if given ranges intersect, else false
5545// Prereq : For both ranges, range->end - range->start > 0. This case should have already resulted
5546// in an error so not checking that here
5547// pad_ranges bool indicates a linear and non-linear comparison which requires padding
5548// In the case where padding is required, if an alias is encountered then a validation error is reported and skip_call
5549// may be set by the callback function so caller should merge in skip_call value if padding case is possible.
Tobin Ehlis17b65522016-08-12 14:26:31 -06005550static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, MEMORY_RANGE const *range2, bool *skip_call) {
5551 *skip_call = false;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005552 auto r1_start = range1->start;
5553 auto r1_end = range1->end;
5554 auto r2_start = range2->start;
5555 auto r2_end = range2->end;
5556 VkDeviceSize pad_align = 1;
5557 if (range1->linear != range2->linear) {
5558 pad_align = dev_data->phys_dev_properties.properties.limits.bufferImageGranularity;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005559 }
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005560 if ((r1_end & ~(pad_align - 1)) < (r2_start & ~(pad_align - 1)))
5561 return false;
5562 if ((r1_start & ~(pad_align - 1)) > (r2_end & ~(pad_align - 1)))
5563 return false;
5564
5565 if (range1->linear != range2->linear) {
Tobin Ehlis951f6ae2016-12-13 12:16:58 -07005566 // In linear vs. non-linear case, warn of aliasing
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005567 const char *r1_linear_str = range1->linear ? "Linear" : "Non-linear";
5568 const char *r1_type_str = range1->image ? "image" : "buffer";
5569 const char *r2_linear_str = range2->linear ? "linear" : "non-linear";
5570 const char *r2_type_str = range2->image ? "image" : "buffer";
5571 auto obj_type = range1->image ? VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT : VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT;
Tobin Ehlis951f6ae2016-12-13 12:16:58 -07005572 *skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, obj_type, range1->handle, 0,
5573 MEMTRACK_INVALID_ALIASING, "MEM", "%s %s 0x%" PRIx64 " is aliased with %s %s 0x%" PRIx64
5574 " which may indicate a bug. For further info refer to the "
5575 "Buffer-Image Granularity section of the Vulkan specification. "
5576 "(https://www.khronos.org/registry/vulkan/specs/1.0-extensions/"
5577 "xhtml/vkspec.html#resources-bufferimagegranularity)",
5578 r1_linear_str, r1_type_str, range1->handle, r2_linear_str, r2_type_str, range2->handle);
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005579 }
5580 // Ranges intersect
5581 return true;
5582}
Tobin Ehlis0a78ef92016-08-12 14:12:44 -06005583// Simplified rangesIntersect that calls above function to check range1 for intersection with offset & end addresses
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005584static bool rangesIntersect(layer_data const *dev_data, MEMORY_RANGE const *range1, VkDeviceSize offset, VkDeviceSize end) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005585 // Create a local MEMORY_RANGE struct to wrap offset/size
5586 MEMORY_RANGE range_wrap;
5587 // Synch linear with range1 to avoid padding and potential validation error case
5588 range_wrap.linear = range1->linear;
5589 range_wrap.start = offset;
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005590 range_wrap.end = end;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005591 bool tmp_bool;
Tobin Ehlis17b65522016-08-12 14:26:31 -06005592 return rangesIntersect(dev_data, range1, &range_wrap, &tmp_bool);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005593}
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005594// For given mem_info, set all ranges valid that intersect [offset-end] range
5595// TODO : For ranges where there is no alias, we may want to create new buffer ranges that are valid
5596static void SetMemRangesValid(layer_data const *dev_data, DEVICE_MEM_INFO *mem_info, VkDeviceSize offset, VkDeviceSize end) {
5597 bool tmp_bool = false;
Tobin Ehlis125fc632016-12-13 12:39:44 -07005598 MEMORY_RANGE map_range = {};
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005599 map_range.linear = true;
5600 map_range.start = offset;
5601 map_range.end = end;
5602 for (auto &handle_range_pair : mem_info->bound_ranges) {
Tobin Ehlis17b65522016-08-12 14:26:31 -06005603 if (rangesIntersect(dev_data, &handle_range_pair.second, &map_range, &tmp_bool)) {
Tobin Ehlis3d2c3162016-08-10 16:08:00 -06005604 // TODO : WARN here if tmp_bool true?
5605 handle_range_pair.second.valid = true;
5606 }
5607 }
5608}
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005609// Object with given handle is being bound to memory w/ given mem_info struct.
5610// Track the newly bound memory range with given memoryOffset
5611// Also scan any previous ranges, track aliased ranges with new range, and flag an error if a linear
5612// and non-linear range incorrectly overlap.
5613// Return true if an error is flagged and the user callback returns "true", otherwise false
5614// is_image indicates an image object, otherwise handle is for a buffer
5615// is_linear indicates a buffer or linear image
5616static bool InsertMemoryRange(layer_data const *dev_data, uint64_t handle, DEVICE_MEM_INFO *mem_info, VkDeviceSize memoryOffset,
5617 VkMemoryRequirements memRequirements, bool is_image, bool is_linear) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06005618 bool skip_call = false;
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005619 MEMORY_RANGE range;
Tobin Ehlisb3593a42016-03-16 16:00:36 -06005620
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005621 range.image = is_image;
5622 range.handle = handle;
5623 range.linear = is_linear;
Tobin Ehlisc3e9c7b2016-08-10 17:00:51 -06005624 range.valid = mem_info->global_valid;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005625 range.memory = mem_info->mem;
5626 range.start = memoryOffset;
5627 range.size = memRequirements.size;
5628 range.end = memoryOffset + memRequirements.size - 1;
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005629 range.aliases.clear();
5630 // Update Memory aliasing
Tobin Ehlis6f3a2d12016-12-13 12:26:44 -07005631 // Save aliased ranges so we can copy into final map entry below. Can't do it in loop b/c we don't yet have final ptr. If we
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005632 // inserted into map before loop to get the final ptr, then we may enter loop when not needed & we check range against itself
5633 std::unordered_set<MEMORY_RANGE *> tmp_alias_ranges;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005634 for (auto &obj_range_pair : mem_info->bound_ranges) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005635 auto check_range = &obj_range_pair.second;
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005636 bool intersection_error = false;
Tobin Ehlis17b65522016-08-12 14:26:31 -06005637 if (rangesIntersect(dev_data, &range, check_range, &intersection_error)) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005638 skip_call |= intersection_error;
5639 range.aliases.insert(check_range);
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005640 tmp_alias_ranges.insert(check_range);
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005641 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005642 }
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005643 mem_info->bound_ranges[handle] = std::move(range);
5644 for (auto tmp_range : tmp_alias_ranges) {
5645 tmp_range->aliases.insert(&mem_info->bound_ranges[handle]);
5646 }
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005647 if (is_image)
5648 mem_info->bound_images.insert(handle);
5649 else
5650 mem_info->bound_buffers.insert(handle);
5651
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005652 return skip_call;
5653}
5654
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005655static bool InsertImageMemoryRange(layer_data const *dev_data, VkImage image, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005656 VkMemoryRequirements mem_reqs, bool is_linear) {
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005657 return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(image), mem_info, mem_offset, mem_reqs, true, is_linear);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005658}
5659
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005660static bool InsertBufferMemoryRange(layer_data const *dev_data, VkBuffer buffer, DEVICE_MEM_INFO *mem_info, VkDeviceSize mem_offset,
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005661 VkMemoryRequirements mem_reqs) {
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005662 return InsertMemoryRange(dev_data, reinterpret_cast<uint64_t &>(buffer), mem_info, mem_offset, mem_reqs, false, true);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005663}
5664
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005665// Remove MEMORY_RANGE struct for give handle from bound_ranges of mem_info
5666// is_image indicates if handle is for image or buffer
5667// This function will also remove the handle-to-index mapping from the appropriate
5668// map and clean up any aliases for range being removed.
5669static void RemoveMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info, bool is_image) {
5670 auto erase_range = &mem_info->bound_ranges[handle];
5671 for (auto alias_range : erase_range->aliases) {
5672 alias_range->aliases.erase(erase_range);
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005673 }
Tobin Ehlis32fe2202016-08-10 09:55:59 -06005674 erase_range->aliases.clear();
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005675 mem_info->bound_ranges.erase(handle);
Tobin Ehlisfed999f2016-09-21 15:09:45 -06005676 if (is_image) {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005677 mem_info->bound_images.erase(handle);
Tobin Ehlisfed999f2016-09-21 15:09:45 -06005678 } else {
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005679 mem_info->bound_buffers.erase(handle);
Tobin Ehlisfed999f2016-09-21 15:09:45 -06005680 }
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -06005681}
5682
5683static void RemoveBufferMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, false); }
5684
5685static void RemoveImageMemoryRange(uint64_t handle, DEVICE_MEM_INFO *mem_info) { RemoveMemoryRange(handle, mem_info, true); }
5686
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005687static bool PreCallValidateDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE **buffer_state,
5688 VK_OBJECT *obj_struct) {
5689 *buffer_state = getBufferState(dev_data, buffer);
5690 *obj_struct = {reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT};
Tobin Ehlis98a94f12016-12-19 10:57:29 -07005691 if (dev_data->instance_data->disabled.destroy_buffer)
5692 return false;
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005693 bool skip = false;
5694 if (*buffer_state) {
5695 skip |= validateIdleBuffer(dev_data, buffer);
5696 }
5697 return skip;
5698}
5699
5700static void PostCallRecordDestroyBuffer(layer_data *dev_data, VkBuffer buffer, BUFFER_STATE *buffer_state, VK_OBJECT obj_struct) {
5701 invalidateCommandBuffers(dev_data, buffer_state->cb_bindings, obj_struct);
5702 for (auto mem_binding : buffer_state->GetBoundMemory()) {
5703 auto mem_info = getMemObjInfo(dev_data, mem_binding);
5704 if (mem_info) {
5705 RemoveBufferMemoryRange(reinterpret_cast<uint64_t &>(buffer), mem_info);
5706 }
5707 }
5708 ClearMemoryObjectBindings(dev_data, reinterpret_cast<uint64_t &>(buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT);
5709 dev_data->bufferMap.erase(buffer_state->buffer);
5710}
5711
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005712VKAPI_ATTR void VKAPI_CALL DestroyBuffer(VkDevice device, VkBuffer buffer,
5713 const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005714 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005715 BUFFER_STATE *buffer_state = nullptr;
5716 VK_OBJECT obj_struct;
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005717 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005718 bool skip = PreCallValidateDestroyBuffer(dev_data, buffer, &buffer_state, &obj_struct);
5719 if (!skip) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005720 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005721 dev_data->dispatch_table.DestroyBuffer(device, buffer, pAllocator);
Tobin Ehlis4f94a0c2016-12-19 10:53:58 -07005722 lock.lock();
5723 PostCallRecordDestroyBuffer(dev_data, buffer, buffer_state, obj_struct);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005724 }
5725}
5726
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005727static bool PreCallValidateDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE **buffer_view_state,
5728 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005729 *buffer_view_state = getBufferViewState(dev_data, buffer_view);
5730 *obj_struct = {reinterpret_cast<uint64_t &>(buffer_view), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005731 if (dev_data->instance_data->disabled.destroy_buffer_view)
Tobin Ehlisb1c2e762016-09-28 14:25:44 -06005732 return false;
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005733 bool skip = false;
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005734 if (*buffer_view_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06005735 skip |= ValidateObjectNotInUse(dev_data, *buffer_view_state, *obj_struct, VALIDATION_ERROR_00701);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005736 }
5737 return skip;
5738}
5739
5740static void PostCallRecordDestroyBufferView(layer_data *dev_data, VkBufferView buffer_view, BUFFER_VIEW_STATE *buffer_view_state,
5741 VK_OBJECT obj_struct) {
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005742 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005743 invalidateCommandBuffers(dev_data, buffer_view_state->cb_bindings, obj_struct);
Tobin Ehlis7fa33f42016-10-05 21:50:14 -06005744 dev_data->bufferViewMap.erase(buffer_view);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005745}
5746
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005747VKAPI_ATTR void VKAPI_CALL
5748DestroyBufferView(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005749 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005750 // Common data objects used pre & post call
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005751 BUFFER_VIEW_STATE *buffer_view_state = nullptr;
5752 VK_OBJECT obj_struct;
Tobin Ehlisd4aef972016-10-12 08:53:27 -06005753 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005754 // Validate state before calling down chain, update common data if we'll be calling down chain
5755 bool skip = PreCallValidateDestroyBufferView(dev_data, bufferView, &buffer_view_state, &obj_struct);
Tobin Ehlisd79532b2016-09-26 17:38:00 -06005756 if (!skip) {
Tobin Ehlisd79532b2016-09-26 17:38:00 -06005757 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005758 dev_data->dispatch_table.DestroyBufferView(device, bufferView, pAllocator);
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005759 lock.lock();
Tobin Ehlis4f451da2016-09-28 14:01:13 -06005760 PostCallRecordDestroyBufferView(dev_data, bufferView, buffer_view_state, obj_struct);
Tobin Ehlisd79532b2016-09-26 17:38:00 -06005761 }
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005762}
5763
Tobin Ehlis30df15c2016-10-12 17:17:57 -06005764static bool PreCallValidateDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE **image_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005765 *image_state = getImageState(dev_data, image);
5766 *obj_struct = {reinterpret_cast<uint64_t &>(image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005767 if (dev_data->instance_data->disabled.destroy_image)
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005768 return false;
5769 bool skip = false;
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005770 if (*image_state) {
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005771 skip |= ValidateObjectNotInUse(dev_data, *image_state, *obj_struct, VALIDATION_ERROR_00743);
5772 }
5773 return skip;
5774}
5775
Tobin Ehlis30df15c2016-10-12 17:17:57 -06005776static void PostCallRecordDestroyImage(layer_data *dev_data, VkImage image, IMAGE_STATE *image_state, VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005777 invalidateCommandBuffers(dev_data, image_state->cb_bindings, obj_struct);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005778 // Clean up memory mapping, bindings and range references for image
Tobin Ehlis640a81c2016-11-15 15:37:18 -07005779 for (auto mem_binding : image_state->GetBoundMemory()) {
5780 auto mem_info = getMemObjInfo(dev_data, mem_binding);
5781 if (mem_info) {
5782 RemoveImageMemoryRange(obj_struct.handle, mem_info);
5783 }
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005784 }
Tobin Ehlis2fca09c2016-10-11 18:46:21 -06005785 ClearMemoryObjectBindings(dev_data, obj_struct.handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005786 // Remove image from imageMap
5787 dev_data->imageMap.erase(image);
5788
5789 const auto &sub_entry = dev_data->imageSubresourceMap.find(image);
5790 if (sub_entry != dev_data->imageSubresourceMap.end()) {
5791 for (const auto &pair : sub_entry->second) {
5792 dev_data->imageLayoutMap.erase(pair);
5793 }
5794 dev_data->imageSubresourceMap.erase(sub_entry);
5795 }
5796}
5797
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005798VKAPI_ATTR void VKAPI_CALL DestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005799 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06005800 IMAGE_STATE *image_state = nullptr;
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005801 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06005802 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005803 bool skip = PreCallValidateDestroyImage(dev_data, image, &image_state, &obj_struct);
Tobin Ehlis6fcd6e42016-09-21 14:28:42 -06005804 if (!skip) {
Tobin Ehlis6fcd6e42016-09-21 14:28:42 -06005805 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005806 dev_data->dispatch_table.DestroyImage(device, image, pAllocator);
Tobin Ehliscd9008e2016-10-12 17:14:02 -06005807 lock.lock();
5808 PostCallRecordDestroyImage(dev_data, image, image_state, obj_struct);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005809 }
5810}
5811
Mark Lobodzinskib5563d32016-06-14 12:11:29 -06005812static bool ValidateMemoryTypes(const layer_data *dev_data, const DEVICE_MEM_INFO *mem_info, const uint32_t memory_type_bits,
Mike Weiblendcca8592016-12-15 12:24:24 -07005813 const char *funcName, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Mark Lobodzinskib5563d32016-06-14 12:11:29 -06005814 bool skip_call = false;
Tobin Ehlis06d54a12016-08-04 08:03:32 -06005815 if (((1 << mem_info->alloc_info.memoryTypeIndex) & memory_type_bits) == 0) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005816 skip_call =
5817 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
5818 reinterpret_cast<const uint64_t &>(mem_info->mem), __LINE__, msgCode, "MT",
5819 "%s(): MemoryRequirements->memoryTypeBits (0x%X) for this object type are not compatible with the memory "
5820 "type (0x%X) of this memory object 0x%" PRIx64 ". %s",
5821 funcName, memory_type_bits, mem_info->alloc_info.memoryTypeIndex,
5822 reinterpret_cast<const uint64_t &>(mem_info->mem), validation_error_map[msgCode]);
Mark Lobodzinskib5563d32016-06-14 12:11:29 -06005823 }
5824 return skip_call;
5825}
5826
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005827VKAPI_ATTR VkResult VKAPI_CALL
5828BindBufferMemory(VkDevice device, VkBuffer buffer, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
Tobin Ehlisb3593a42016-03-16 16:00:36 -06005829 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005830 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005831 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005832 // Track objects tied to memory
Tobin Ehlise20d21f2016-08-10 18:01:27 -06005833 uint64_t buffer_handle = reinterpret_cast<uint64_t &>(buffer);
Tobin Ehlis4ff58172016-09-22 10:52:00 -06005834 bool skip_call = SetMemBinding(dev_data, mem, buffer_handle, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, "vkBindBufferMemory");
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005835 auto buffer_state = getBufferState(dev_data, buffer);
5836 if (buffer_state) {
Tobin Ehlis8c189702016-11-17 13:39:57 -07005837 if (!buffer_state->memory_requirements_checked) {
5838 // There's not an explicit requirement in the spec to call vkGetBufferMemoryRequirements() prior to calling
5839 // BindBufferMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
5840 // vkGetBufferMemoryRequirements()
5841 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
5842 buffer_handle, __LINE__, DRAWSTATE_INVALID_BUFFER, "DS",
5843 "vkBindBufferMemory(): Binding memory to buffer 0x%" PRIxLEAST64
5844 " but vkGetBufferMemoryRequirements() has not been called on that buffer.",
5845 buffer_handle);
5846 // Make the call for them so we can verify the state
5847 lock.unlock();
5848 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, &buffer_state->requirements);
5849 lock.lock();
5850 }
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005851 buffer_state->binding.mem = mem;
5852 buffer_state->binding.offset = memoryOffset;
Tobin Ehlis8c189702016-11-17 13:39:57 -07005853 buffer_state->binding.size = buffer_state->requirements.size;
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005854
5855 // Track and validate bound memory range information
Tobin Ehlis997b2582016-06-02 08:43:37 -06005856 auto mem_info = getMemObjInfo(dev_data, mem);
5857 if (mem_info) {
Tobin Ehlis8c189702016-11-17 13:39:57 -07005858 skip_call |= InsertBufferMemoryRange(dev_data, buffer, mem_info, memoryOffset, buffer_state->requirements);
Mike Weiblendcca8592016-12-15 12:24:24 -07005859 skip_call |= ValidateMemoryTypes(dev_data, mem_info, buffer_state->requirements.memoryTypeBits, "vkBindBufferMemory()",
5860 VALIDATION_ERROR_00797);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -06005861 }
5862
Dustin Gravesa97c3942016-03-31 18:01:37 -06005863 // Validate memory requirements alignment
Tobin Ehlis8c189702016-11-17 13:39:57 -07005864 if (vk_safe_modulo(memoryOffset, buffer_state->requirements.alignment) != 0) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005865 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5866 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_02174, "DS",
5867 "vkBindBufferMemory(): memoryOffset is 0x%" PRIxLEAST64 " but must be an integer multiple of the "
5868 "VkMemoryRequirements::alignment value 0x%" PRIxLEAST64
5869 ", returned from a call to vkGetBufferMemoryRequirements with buffer. %s",
5870 memoryOffset, buffer_state->requirements.alignment, validation_error_map[VALIDATION_ERROR_02174]);
Dustin Gravesa97c3942016-03-31 18:01:37 -06005871 }
Mark Muellerec0bafe2016-07-07 11:57:59 -06005872
Dustin Gravesa97c3942016-03-31 18:01:37 -06005873 // Validate device limits alignments
Mark Muellerec0bafe2016-07-07 11:57:59 -06005874 static const VkBufferUsageFlagBits usage_list[3] = {
5875 static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
5876 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
5877 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT};
5878 static const char *memory_type[3] = {"texel",
5879 "uniform",
5880 "storage"};
5881 static const char *offset_name[3] = {
5882 "minTexelBufferOffsetAlignment",
5883 "minUniformBufferOffsetAlignment",
5884 "minStorageBufferOffsetAlignment"
5885 };
Mike Weiblendcca8592016-12-15 12:24:24 -07005886 static const UNIQUE_VALIDATION_ERROR_CODE msgCode[3] = {
5887 VALIDATION_ERROR_00794,
5888 VALIDATION_ERROR_00795,
5889 VALIDATION_ERROR_00796
5890 };
Mark Muellerec0bafe2016-07-07 11:57:59 -06005891
5892 // Keep this one fresh!
5893 const VkDeviceSize offset_requirement[3] = {
5894 dev_data->phys_dev_properties.properties.limits.minTexelBufferOffsetAlignment,
5895 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
5896 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment
5897 };
Tobin Ehlis94bc5d22016-06-02 07:46:52 -06005898 VkBufferUsageFlags usage = dev_data->bufferMap[buffer].get()->createInfo.usage;
Mark Muellerec0bafe2016-07-07 11:57:59 -06005899
5900 for (int i = 0; i < 3; i++) {
5901 if (usage & usage_list[i]) {
5902 if (vk_safe_modulo(memoryOffset, offset_requirement[i]) != 0) {
Mike Weiblendcca8592016-12-15 12:24:24 -07005903 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
5904 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, msgCode[i], "DS",
5905 "vkBindBufferMemory(): %s memoryOffset is 0x%" PRIxLEAST64 " but must be a multiple of "
5906 "device limit %s 0x%" PRIxLEAST64 ". %s",
5907 memory_type[i], memoryOffset, offset_name[i], offset_requirement[i],
5908 validation_error_map[msgCode[i]]);
Mark Muellerec0bafe2016-07-07 11:57:59 -06005909 }
Dustin Gravesa97c3942016-03-31 18:01:37 -06005910 }
5911 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005912 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005913 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06005914 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13005915 result = dev_data->dispatch_table.BindBufferMemory(device, buffer, mem, memoryOffset);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005916 }
5917 return result;
5918}
5919
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005920VKAPI_ATTR void VKAPI_CALL
5921GetBufferMemoryRequirements(VkDevice device, VkBuffer buffer, VkMemoryRequirements *pMemoryRequirements) {
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005922 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5923 dev_data->dispatch_table.GetBufferMemoryRequirements(device, buffer, pMemoryRequirements);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07005924 auto buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005925 if (buffer_state) {
5926 buffer_state->requirements = *pMemoryRequirements;
Tobin Ehlis8c189702016-11-17 13:39:57 -07005927 buffer_state->memory_requirements_checked = true;
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005928 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005929}
5930
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005931VKAPI_ATTR void VKAPI_CALL
5932GetImageMemoryRequirements(VkDevice device, VkImage image, VkMemoryRequirements *pMemoryRequirements) {
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005933 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5934 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, pMemoryRequirements);
5935 auto image_state = getImageState(dev_data, image);
5936 if (image_state) {
5937 image_state->requirements = *pMemoryRequirements;
Tobin Ehlis8c189702016-11-17 13:39:57 -07005938 image_state->memory_requirements_checked = true;
Tobin Ehlis530bb0a2016-11-16 08:57:22 -07005939 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005940}
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06005941
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005942static bool PreCallValidateDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE **image_view_state,
5943 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005944 *image_view_state = getImageViewState(dev_data, image_view);
5945 *obj_struct = {reinterpret_cast<uint64_t &>(image_view), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005946 if (dev_data->instance_data->disabled.destroy_image_view)
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005947 return false;
5948 bool skip = false;
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005949 if (*image_view_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06005950 skip |= ValidateObjectNotInUse(dev_data, *image_view_state, *obj_struct, VALIDATION_ERROR_00776);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005951 }
5952 return skip;
5953}
5954
5955static void PostCallRecordDestroyImageView(layer_data *dev_data, VkImageView image_view, IMAGE_VIEW_STATE *image_view_state,
5956 VK_OBJECT obj_struct) {
5957 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07005958 invalidateCommandBuffers(dev_data, image_view_state->cb_bindings, obj_struct);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005959 dev_data->imageViewMap.erase(image_view);
5960}
5961
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005962VKAPI_ATTR void VKAPI_CALL
5963DestroyImageView(VkDevice device, VkImageView imageView, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005964 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005965 // Common data objects used pre & post call
5966 IMAGE_VIEW_STATE *image_view_state = nullptr;
5967 VK_OBJECT obj_struct;
Tobin Ehlisd4aef972016-10-12 08:53:27 -06005968 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005969 bool skip = PreCallValidateDestroyImageView(dev_data, imageView, &image_view_state, &obj_struct);
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005970 if (!skip) {
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005971 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13005972 dev_data->dispatch_table.DestroyImageView(device, imageView, pAllocator);
Tobin Ehlise1bb80c2016-10-12 08:27:08 -06005973 lock.lock();
5974 PostCallRecordDestroyImageView(dev_data, imageView, image_view_state, obj_struct);
Tobin Ehlisdd9830a2016-09-19 13:10:37 -06005975 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005976}
5977
Chia-I Wu629d7cd2016-05-06 11:32:54 +08005978VKAPI_ATTR void VKAPI_CALL
5979DestroyShaderModule(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks *pAllocator) {
Chris Forbes90da2e92016-03-18 16:30:03 +13005980 layer_data *my_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
5981
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005982 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes90da2e92016-03-18 16:30:03 +13005983 my_data->shaderModuleMap.erase(shaderModule);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06005984 lock.unlock();
Chris Forbes90da2e92016-03-18 16:30:03 +13005985
Chris Forbesaaa9c282016-10-03 20:01:14 +13005986 my_data->dispatch_table.DestroyShaderModule(device, shaderModule, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07005987}
5988
Tobin Ehlis52c76a32016-10-12 09:05:51 -06005989static bool PreCallValidateDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE **pipeline_state,
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06005990 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06005991 *pipeline_state = getPipelineState(dev_data, pipeline);
5992 *obj_struct = {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13005993 if (dev_data->instance_data->disabled.destroy_pipeline)
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06005994 return false;
5995 bool skip = false;
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06005996 if (*pipeline_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06005997 skip |= ValidateObjectNotInUse(dev_data, *pipeline_state, *obj_struct, VALIDATION_ERROR_00555);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06005998 }
5999 return skip;
6000}
6001
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006002static void PostCallRecordDestroyPipeline(layer_data *dev_data, VkPipeline pipeline, PIPELINE_STATE *pipeline_state,
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006003 VK_OBJECT obj_struct) {
6004 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006005 invalidateCommandBuffers(dev_data, pipeline_state->cb_bindings, obj_struct);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006006 dev_data->pipelineMap.erase(pipeline);
6007}
6008
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006009VKAPI_ATTR void VKAPI_CALL
6010DestroyPipeline(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisaa0bc162016-07-08 15:42:38 -06006011 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006012 PIPELINE_STATE *pipeline_state = nullptr;
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006013 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006014 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006015 bool skip = PreCallValidateDestroyPipeline(dev_data, pipeline, &pipeline_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006016 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006017 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13006018 dev_data->dispatch_table.DestroyPipeline(device, pipeline, pAllocator);
Tobin Ehlisbe45ef22016-10-12 08:51:35 -06006019 lock.lock();
6020 PostCallRecordDestroyPipeline(dev_data, pipeline, pipeline_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006021 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006022}
6023
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006024VKAPI_ATTR void VKAPI_CALL
6025DestroyPipelineLayout(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis7e5b3752016-07-07 12:04:20 -06006026 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis3b4926e2016-07-07 13:40:29 -06006027 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis7e5b3752016-07-07 12:04:20 -06006028 dev_data->pipelineLayoutMap.erase(pipelineLayout);
Tobin Ehlis3b4926e2016-07-07 13:40:29 -06006029 lock.unlock();
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006030
Chris Forbesaaa9c282016-10-03 20:01:14 +13006031 dev_data->dispatch_table.DestroyPipelineLayout(device, pipelineLayout, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006032}
6033
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006034static bool PreCallValidateDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE **sampler_state,
Tobin Ehlise030b102016-10-20 06:35:39 -06006035 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006036 *sampler_state = getSamplerState(dev_data, sampler);
6037 *obj_struct = {reinterpret_cast<uint64_t &>(sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT};
Tobin Ehlise030b102016-10-20 06:35:39 -06006038 if (dev_data->instance_data->disabled.destroy_sampler)
6039 return false;
6040 bool skip = false;
Tobin Ehlise030b102016-10-20 06:35:39 -06006041 if (*sampler_state) {
Tobin Ehlise030b102016-10-20 06:35:39 -06006042 skip |= ValidateObjectNotInUse(dev_data, *sampler_state, *obj_struct, VALIDATION_ERROR_00837);
6043 }
6044 return skip;
6045}
6046
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006047static void PostCallRecordDestroySampler(layer_data *dev_data, VkSampler sampler, SAMPLER_STATE *sampler_state,
Tobin Ehlise030b102016-10-20 06:35:39 -06006048 VK_OBJECT obj_struct) {
6049 // Any bound cmd buffers are now invalid
Tobin Ehlisd3190872016-10-25 21:47:46 -06006050 if (sampler_state)
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006051 invalidateCommandBuffers(dev_data, sampler_state->cb_bindings, obj_struct);
Tobin Ehlise030b102016-10-20 06:35:39 -06006052 dev_data->samplerMap.erase(sampler);
6053}
6054
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006055VKAPI_ATTR void VKAPI_CALL
6056DestroySampler(VkDevice device, VkSampler sampler, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis8020eea2016-08-17 11:10:41 -06006057 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006058 SAMPLER_STATE *sampler_state = nullptr;
Tobin Ehlise030b102016-10-20 06:35:39 -06006059 VK_OBJECT obj_struct;
Tobin Ehlis8020eea2016-08-17 11:10:41 -06006060 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlise030b102016-10-20 06:35:39 -06006061 bool skip = PreCallValidateDestroySampler(dev_data, sampler, &sampler_state, &obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006062 if (!skip) {
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006063 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13006064 dev_data->dispatch_table.DestroySampler(device, sampler, pAllocator);
Tobin Ehlise030b102016-10-20 06:35:39 -06006065 lock.lock();
6066 PostCallRecordDestroySampler(dev_data, sampler, sampler_state, obj_struct);
Tobin Ehlis1afd0f52016-08-26 17:09:24 -06006067 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006068}
6069
Tobin Ehlis2c140ae2016-12-19 11:16:14 -07006070static void PostCallRecordDestroyDescriptorSetLayout(layer_data *dev_data, VkDescriptorSetLayout ds_layout) {
6071 dev_data->descriptorSetLayoutMap.erase(ds_layout);
6072}
6073
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006074VKAPI_ATTR void VKAPI_CALL
6075DestroyDescriptorSetLayout(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis2c140ae2016-12-19 11:16:14 -07006076 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6077 dev_data->dispatch_table.DestroyDescriptorSetLayout(device, descriptorSetLayout, pAllocator);
6078 std::unique_lock<std::mutex> lock(global_lock);
6079 PostCallRecordDestroyDescriptorSetLayout(dev_data, descriptorSetLayout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006080}
6081
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006082static bool PreCallValidateDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool pool,
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06006083 DESCRIPTOR_POOL_STATE **desc_pool_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006084 *desc_pool_state = getDescriptorPoolState(dev_data, pool);
6085 *obj_struct = {reinterpret_cast<uint64_t &>(pool), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006086 if (dev_data->instance_data->disabled.destroy_descriptor_pool)
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006087 return false;
6088 bool skip = false;
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006089 if (*desc_pool_state) {
Tobin Ehlis4a98a712016-10-12 09:58:35 -06006090 skip |= ValidateObjectNotInUse(dev_data, *desc_pool_state, *obj_struct, VALIDATION_ERROR_00901);
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006091 }
6092 return skip;
6093}
6094
6095static void PostCallRecordDestroyDescriptorPool(layer_data *dev_data, VkDescriptorPool descriptorPool,
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06006096 DESCRIPTOR_POOL_STATE *desc_pool_state, VK_OBJECT obj_struct) {
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006097 // Any bound cmd buffers are now invalid
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006098 invalidateCommandBuffers(dev_data, desc_pool_state->cb_bindings, obj_struct);
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006099 // Free sets that were in this pool
6100 for (auto ds : desc_pool_state->sets) {
6101 freeDescriptorSet(dev_data, ds);
6102 }
6103 dev_data->descriptorPoolMap.erase(descriptorPool);
6104}
6105
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006106VKAPI_ATTR void VKAPI_CALL
6107DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006108 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06006109 DESCRIPTOR_POOL_STATE *desc_pool_state = nullptr;
Tobin Ehlisb69d6932016-10-12 09:29:26 -06006110 VK_OBJECT obj_struct;
6111 std::unique_lock<std::mutex> lock(global_lock);
6112 bool skip = PreCallValidateDestroyDescriptorPool(dev_data, descriptorPool, &desc_pool_state, &obj_struct);
6113 if (!skip) {
6114 lock.unlock();
6115 dev_data->dispatch_table.DestroyDescriptorPool(device, descriptorPool, pAllocator);
6116 lock.lock();
6117 PostCallRecordDestroyDescriptorPool(dev_data, descriptorPool, desc_pool_state, obj_struct);
6118 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006119}
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006120// Verify cmdBuffer in given cb_node is not in global in-flight set, and return skip_call result
6121// If this is a secondary command buffer, then make sure its primary is also in-flight
6122// If primary is not in-flight, then remove secondary from global in-flight set
6123// This function is only valid at a point when cmdBuffer is being reset or freed
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006124static bool checkCommandBufferInFlight(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const char *action,
6125 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006126 bool skip_call = false;
6127 if (dev_data->globalInFlightCmdBuffers.count(cb_node->commandBuffer)) {
6128 // Primary CB or secondary where primary is also in-flight is an error
6129 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_SECONDARY) ||
6130 (dev_data->globalInFlightCmdBuffers.count(cb_node->primaryCommandBuffer))) {
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006131 skip_call |=
6132 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
6133 reinterpret_cast<const uint64_t &>(cb_node->commandBuffer), __LINE__, error_code, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07006134 "Attempt to %s command buffer (0x%p) which is in use. %s", action, cb_node->commandBuffer,
6135 validation_error_map[error_code]);
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006136 }
6137 }
6138 return skip_call;
6139}
Chris Forbese30fb982016-06-21 12:35:16 +12006140
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006141// Iterate over all cmdBuffers in given commandPool and verify that each is not in use
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006142static bool checkCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool, const char *action,
6143 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006144 bool skip_call = false;
Chris Forbes07811fa2016-06-21 13:18:44 +12006145 for (auto cmd_buffer : pPool->commandBuffers) {
6146 if (dev_data->globalInFlightCmdBuffers.count(cmd_buffer)) {
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006147 skip_call |= checkCommandBufferInFlight(dev_data, getCBNode(dev_data, cmd_buffer), action, error_code);
Tobin Ehlis739d62a2016-04-14 12:22:03 -06006148 }
6149 }
6150 return skip_call;
6151}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006152
Chris Forbes07811fa2016-06-21 13:18:44 +12006153static void clearCommandBuffersInFlight(layer_data *dev_data, COMMAND_POOL_NODE *pPool) {
6154 for (auto cmd_buffer : pPool->commandBuffers) {
6155 dev_data->globalInFlightCmdBuffers.erase(cmd_buffer);
Chris Forbese30fb982016-06-21 12:35:16 +12006156 }
6157}
6158
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006159VKAPI_ATTR void VKAPI_CALL
6160FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer *pCommandBuffers) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006161 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006162 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006163 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes4e355d02016-06-21 13:24:04 +12006164
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006165 for (uint32_t i = 0; i < commandBufferCount; i++) {
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006166 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006167 // Delete CB information structure, and remove from commandBufferMap
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006168 if (cb_node) {
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006169 skip_call |= checkCommandBufferInFlight(dev_data, cb_node, "free", VALIDATION_ERROR_00096);
Chris Forbes4e355d02016-06-21 13:24:04 +12006170 }
6171 }
Chris Forbes4e355d02016-06-21 13:24:04 +12006172
6173 if (skip_call)
6174 return;
6175
Chris Forbes4e355d02016-06-21 13:24:04 +12006176 auto pPool = getCommandPoolNode(dev_data, commandPool);
6177 for (uint32_t i = 0; i < commandBufferCount; i++) {
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006178 auto cb_node = getCBNode(dev_data, pCommandBuffers[i]);
Chris Forbes4e355d02016-06-21 13:24:04 +12006179 // Delete CB information structure, and remove from commandBufferMap
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006180 if (cb_node) {
6181 dev_data->globalInFlightCmdBuffers.erase(cb_node->commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006182 // reset prior to delete for data clean-up
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006183 resetCB(dev_data, cb_node->commandBuffer);
6184 dev_data->commandBufferMap.erase(cb_node->commandBuffer);
6185 delete cb_node;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006186 }
6187
6188 // Remove commandBuffer reference from commandPoolMap
Chris Forbes4e355d02016-06-21 13:24:04 +12006189 pPool->commandBuffers.remove(pCommandBuffers[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006190 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006191 lock.unlock();
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006192
Chris Forbesaaa9c282016-10-03 20:01:14 +13006193 dev_data->dispatch_table.FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006194}
6195
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006196VKAPI_ATTR VkResult VKAPI_CALL CreateCommandPool(VkDevice device, const VkCommandPoolCreateInfo *pCreateInfo,
6197 const VkAllocationCallbacks *pAllocator,
6198 VkCommandPool *pCommandPool) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006199 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6200
Chris Forbesaaa9c282016-10-03 20:01:14 +13006201 VkResult result = dev_data->dispatch_table.CreateCommandPool(device, pCreateInfo, pAllocator, pCommandPool);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006202
6203 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006204 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006205 dev_data->commandPoolMap[*pCommandPool].createFlags = pCreateInfo->flags;
6206 dev_data->commandPoolMap[*pCommandPool].queueFamilyIndex = pCreateInfo->queueFamilyIndex;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006207 }
6208 return result;
6209}
6210
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006211VKAPI_ATTR VkResult VKAPI_CALL CreateQueryPool(VkDevice device, const VkQueryPoolCreateInfo *pCreateInfo,
6212 const VkAllocationCallbacks *pAllocator, VkQueryPool *pQueryPool) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006213
6214 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis8a7519b2016-11-21 12:30:06 -07006215 bool skip = false;
6216 if (pCreateInfo && pCreateInfo->queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS) {
6217 if (!dev_data->enabled_features.pipelineStatisticsQuery) {
6218 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT, 0,
6219 __LINE__, VALIDATION_ERROR_01006, "DS",
6220 "Query pool with type VK_QUERY_TYPE_PIPELINE_STATISTICS created on a device "
6221 "with VkDeviceCreateInfo.pEnabledFeatures.pipelineStatisticsQuery == VK_FALSE. %s",
6222 validation_error_map[VALIDATION_ERROR_01006]);
6223 }
6224 }
6225
6226 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6227 if (!skip) {
6228 result = dev_data->dispatch_table.CreateQueryPool(device, pCreateInfo, pAllocator, pQueryPool);
6229 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006230 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006231 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlis7d1dd142016-08-18 08:23:30 -06006232 QUERY_POOL_NODE *qp_node = &dev_data->queryPoolMap[*pQueryPool];
6233 qp_node->createInfo = *pCreateInfo;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006234 }
6235 return result;
6236}
6237
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006238static bool PreCallValidateDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE **cp_state) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006239 *cp_state = getCommandPoolNode(dev_data, pool);
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006240 if (dev_data->instance_data->disabled.destroy_command_pool)
6241 return false;
6242 bool skip = false;
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006243 if (*cp_state) {
6244 // Verify that command buffers in pool are complete (not in-flight)
6245 skip |= checkCommandBuffersInFlight(dev_data, *cp_state, "destroy command pool with", VALIDATION_ERROR_00077);
6246 }
6247 return skip;
6248}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006249
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006250static void PostCallRecordDestroyCommandPool(layer_data *dev_data, VkCommandPool pool, COMMAND_POOL_NODE *cp_state) {
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006251 // Must remove cmdpool from cmdpoolmap, after removing all cmdbuffers in its list from the commandBufferMap
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006252 clearCommandBuffersInFlight(dev_data, cp_state);
6253 for (auto cb : cp_state->commandBuffers) {
Chris Forbes07811fa2016-06-21 13:18:44 +12006254 clear_cmd_buf_and_mem_references(dev_data, cb);
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006255 auto cb_node = getCBNode(dev_data, cb);
Tobin Ehlis1589d542016-07-22 16:55:19 -06006256 // Remove references to this cb_node prior to delete
6257 // TODO : Need better solution here, resetCB?
Mark Lobodzinski3c9a3842016-07-15 13:53:44 -06006258 for (auto obj : cb_node->object_bindings) {
6259 removeCommandBufferBinding(dev_data, &obj, cb_node);
6260 }
Tobin Ehlis1589d542016-07-22 16:55:19 -06006261 for (auto framebuffer : cb_node->framebuffers) {
Tobin Ehlis04c04272016-10-12 11:54:09 -06006262 auto fb_state = getFramebufferState(dev_data, framebuffer);
6263 if (fb_state)
6264 fb_state->cb_bindings.erase(cb_node);
Tobin Ehlis1589d542016-07-22 16:55:19 -06006265 }
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06006266 dev_data->commandBufferMap.erase(cb); // Remove this command buffer
6267 delete cb_node; // delete CB info structure
Chris Forbes07811fa2016-06-21 13:18:44 +12006268 }
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006269 dev_data->commandPoolMap.erase(pool);
6270}
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006271
Tobin Ehlisa4b4ce22016-10-20 09:40:35 -06006272// Destroy commandPool along with all of the commandBuffers allocated from that pool
6273VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
6274 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6275 COMMAND_POOL_NODE *cp_state = nullptr;
6276 std::unique_lock<std::mutex> lock(global_lock);
6277 bool skip = PreCallValidateDestroyCommandPool(dev_data, commandPool, &cp_state);
6278 if (!skip) {
6279 lock.unlock();
6280 dev_data->dispatch_table.DestroyCommandPool(device, commandPool, pAllocator);
6281 lock.lock();
6282 PostCallRecordDestroyCommandPool(dev_data, commandPool, cp_state);
6283 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006284}
6285
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006286VKAPI_ATTR VkResult VKAPI_CALL
6287ResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006288 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -06006289 bool skip_call = false;
Tobin Ehlis4c522322016-04-11 16:39:29 -06006290
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006291 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes07811fa2016-06-21 13:18:44 +12006292 auto pPool = getCommandPoolNode(dev_data, commandPool);
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06006293 skip_call |= checkCommandBuffersInFlight(dev_data, pPool, "reset command pool with", VALIDATION_ERROR_00072);
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006294 lock.unlock();
Chris Forbes07811fa2016-06-21 13:18:44 +12006295
Tobin Ehlisfe871282016-06-28 10:28:02 -06006296 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006297 return VK_ERROR_VALIDATION_FAILED_EXT;
6298
Chris Forbesaaa9c282016-10-03 20:01:14 +13006299 VkResult result = dev_data->dispatch_table.ResetCommandPool(device, commandPool, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006300
6301 // Reset all of the CBs allocated from this pool
6302 if (VK_SUCCESS == result) {
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006303 lock.lock();
Chris Forbes07811fa2016-06-21 13:18:44 +12006304 clearCommandBuffersInFlight(dev_data, pPool);
6305 for (auto cmdBuffer : pPool->commandBuffers) {
6306 resetCB(dev_data, cmdBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006307 }
Chris Forbes6be1b5a2016-06-21 18:17:41 +12006308 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006309 }
6310 return result;
6311}
6312
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006313VKAPI_ATTR VkResult VKAPI_CALL ResetFences(VkDevice device, uint32_t fenceCount, const VkFence *pFences) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006314 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -06006315 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006316 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006317 for (uint32_t i = 0; i < fenceCount; ++i) {
Chris Forbes0111ee62016-06-15 15:48:52 +12006318 auto pFence = getFenceNode(dev_data, pFences[i]);
Chris Forbes9c457b92016-06-21 18:10:47 +12006319 if (pFence && pFence->state == FENCE_INFLIGHT) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06006320 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07006321 reinterpret_cast<const uint64_t &>(pFences[i]), __LINE__, VALIDATION_ERROR_00183, "DS",
6322 "Fence 0x%" PRIx64 " is in use. %s", reinterpret_cast<const uint64_t &>(pFences[i]),
6323 validation_error_map[VALIDATION_ERROR_00183]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006324 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006325 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006326 lock.unlock();
Chris Forbes9c457b92016-06-21 18:10:47 +12006327
Tobin Ehlisfe871282016-06-28 10:28:02 -06006328 if (skip_call)
Chris Forbes9c457b92016-06-21 18:10:47 +12006329 return VK_ERROR_VALIDATION_FAILED_EXT;
6330
Chris Forbesaaa9c282016-10-03 20:01:14 +13006331 VkResult result = dev_data->dispatch_table.ResetFences(device, fenceCount, pFences);
Chris Forbes9c457b92016-06-21 18:10:47 +12006332
6333 if (result == VK_SUCCESS) {
6334 lock.lock();
6335 for (uint32_t i = 0; i < fenceCount; ++i) {
6336 auto pFence = getFenceNode(dev_data, pFences[i]);
6337 if (pFence) {
6338 pFence->state = FENCE_UNSIGNALED;
Chris Forbes9c457b92016-06-21 18:10:47 +12006339 }
6340 }
6341 lock.unlock();
6342 }
6343
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006344 return result;
6345}
6346
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06006347// For given cb_nodes, invalidate them and track object causing invalidation
Tobin Ehlisab294d82016-11-21 15:23:51 -07006348void invalidateCommandBuffers(const layer_data *dev_data, std::unordered_set<GLOBAL_CB_NODE *> const &cb_nodes, VK_OBJECT obj) {
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06006349 for (auto cb_node : cb_nodes) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006350 if (cb_node->state == CB_RECORDING) {
6351 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Tobin Ehlisfda58e22016-11-21 15:15:52 -07006352 (uint64_t)(cb_node->commandBuffer), __LINE__, DRAWSTATE_INVALID_COMMAND_BUFFER, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07006353 "Invalidating a command buffer that's currently being recorded: 0x%p.", cb_node->commandBuffer);
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006354 }
Tobin Ehlis2556f5b2016-06-24 17:22:16 -06006355 cb_node->state = CB_INVALID;
6356 cb_node->broken_bindings.push_back(obj);
6357 }
6358}
6359
Tobin Ehlis04c04272016-10-12 11:54:09 -06006360static bool PreCallValidateDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer,
6361 FRAMEBUFFER_STATE **framebuffer_state, VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006362 *framebuffer_state = getFramebufferState(dev_data, framebuffer);
6363 *obj_struct = {reinterpret_cast<uint64_t &>(framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006364 if (dev_data->instance_data->disabled.destroy_framebuffer)
Tobin Ehlis53332f02016-10-12 11:48:21 -06006365 return false;
6366 bool skip = false;
Tobin Ehlis53332f02016-10-12 11:48:21 -06006367 if (*framebuffer_state) {
Tobin Ehlis53332f02016-10-12 11:48:21 -06006368 skip |= ValidateObjectNotInUse(dev_data, *framebuffer_state, *obj_struct, VALIDATION_ERROR_00422);
6369 }
6370 return skip;
6371}
6372
Tobin Ehlis04c04272016-10-12 11:54:09 -06006373static void PostCallRecordDestroyFramebuffer(layer_data *dev_data, VkFramebuffer framebuffer, FRAMEBUFFER_STATE *framebuffer_state,
Tobin Ehlis53332f02016-10-12 11:48:21 -06006374 VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006375 invalidateCommandBuffers(dev_data, framebuffer_state->cb_bindings, obj_struct);
Tobin Ehlis53332f02016-10-12 11:48:21 -06006376 dev_data->frameBufferMap.erase(framebuffer);
6377}
6378
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006379VKAPI_ATTR void VKAPI_CALL
6380DestroyFramebuffer(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006381 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis04c04272016-10-12 11:54:09 -06006382 FRAMEBUFFER_STATE *framebuffer_state = nullptr;
Tobin Ehlis53332f02016-10-12 11:48:21 -06006383 VK_OBJECT obj_struct;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006384 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis53332f02016-10-12 11:48:21 -06006385 bool skip = PreCallValidateDestroyFramebuffer(dev_data, framebuffer, &framebuffer_state, &obj_struct);
6386 if (!skip) {
6387 lock.unlock();
6388 dev_data->dispatch_table.DestroyFramebuffer(device, framebuffer, pAllocator);
6389 lock.lock();
6390 PostCallRecordDestroyFramebuffer(dev_data, framebuffer, framebuffer_state, obj_struct);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006391 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006392}
6393
Tobin Ehlis062595b2016-10-12 16:58:54 -06006394static bool PreCallValidateDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE **rp_state,
6395 VK_OBJECT *obj_struct) {
Tobin Ehlisd3190872016-10-25 21:47:46 -06006396 *rp_state = getRenderPassState(dev_data, render_pass);
6397 *obj_struct = {reinterpret_cast<uint64_t &>(render_pass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT};
Chris Forbesa13fe522016-10-13 15:34:59 +13006398 if (dev_data->instance_data->disabled.destroy_renderpass)
Tobin Ehlis062595b2016-10-12 16:58:54 -06006399 return false;
6400 bool skip = false;
Tobin Ehlis062595b2016-10-12 16:58:54 -06006401 if (*rp_state) {
Tobin Ehlis062595b2016-10-12 16:58:54 -06006402 skip |= ValidateObjectNotInUse(dev_data, *rp_state, *obj_struct, VALIDATION_ERROR_00393);
6403 }
6404 return skip;
6405}
6406
6407static void PostCallRecordDestroyRenderPass(layer_data *dev_data, VkRenderPass render_pass, RENDER_PASS_STATE *rp_state,
6408 VK_OBJECT obj_struct) {
Tobin Ehlisfe5731a2016-11-21 08:31:01 -07006409 invalidateCommandBuffers(dev_data, rp_state->cb_bindings, obj_struct);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006410 dev_data->renderPassMap.erase(render_pass);
6411}
6412
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006413VKAPI_ATTR void VKAPI_CALL
6414DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006415 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006416 RENDER_PASS_STATE *rp_state = nullptr;
6417 VK_OBJECT obj_struct;
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06006418 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006419 bool skip = PreCallValidateDestroyRenderPass(dev_data, renderPass, &rp_state, &obj_struct);
Tobin Ehlisa5495242016-09-19 14:20:37 -06006420 if (!skip) {
Tobin Ehlisa5495242016-09-19 14:20:37 -06006421 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13006422 dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator);
Tobin Ehlis062595b2016-10-12 16:58:54 -06006423 lock.lock();
6424 PostCallRecordDestroyRenderPass(dev_data, renderPass, rp_state, obj_struct);
Tobin Ehlisa5495242016-09-19 14:20:37 -06006425 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006426}
6427
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006428VKAPI_ATTR VkResult VKAPI_CALL CreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
6429 const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006430 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Karl Schultza9ef1e52016-10-06 17:53:48 -06006431 // TODO: Add check for VALIDATION_ERROR_00658
6432 // TODO: Add check for VALIDATION_ERROR_00666
6433 // TODO: Add check for VALIDATION_ERROR_00667
6434 // TODO: Add check for VALIDATION_ERROR_00668
6435 // TODO: Add check for VALIDATION_ERROR_00669
Chris Forbesaaa9c282016-10-03 20:01:14 +13006436 VkResult result = dev_data->dispatch_table.CreateBuffer(device, pCreateInfo, pAllocator, pBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006437
6438 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006439 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006440 // TODO : This doesn't create deep copy of pQueueFamilyIndices so need to fix that if/when we want that data to be valid
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006441 dev_data->bufferMap.insert(std::make_pair(*pBuffer, unique_ptr<BUFFER_STATE>(new BUFFER_STATE(*pBuffer, pCreateInfo))));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006442 }
6443 return result;
6444}
6445
Mark Youngd339ba32016-05-30 13:28:35 -06006446static bool PreCallValidateCreateBufferView(layer_data *dev_data, const VkBufferViewCreateInfo *pCreateInfo) {
6447 bool skip_call = false;
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006448 BUFFER_STATE *buffer_state = getBufferState(dev_data, pCreateInfo->buffer);
Mark Youngd339ba32016-05-30 13:28:35 -06006449 // If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006450 if (buffer_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07006451 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCreateBufferView()", VALIDATION_ERROR_02522);
Tobin Ehlis18bca092016-06-29 09:07:52 -06006452 // In order to create a valid buffer view, the buffer must have been created with at least one of the
6453 // following flags: UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07006454 skip_call |= ValidateBufferUsageFlags(
Tobin Ehlis4668dce2016-11-16 09:30:23 -07006455 dev_data, buffer_state, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07006456 VALIDATION_ERROR_00694, "vkCreateBufferView()", "VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06006457 }
Mark Youngd339ba32016-05-30 13:28:35 -06006458 return skip_call;
6459}
6460
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006461VKAPI_ATTR VkResult VKAPI_CALL CreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
6462 const VkAllocationCallbacks *pAllocator, VkBufferView *pView) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006463 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Mark Youngd339ba32016-05-30 13:28:35 -06006464 std::unique_lock<std::mutex> lock(global_lock);
6465 bool skip_call = PreCallValidateCreateBufferView(dev_data, pCreateInfo);
6466 lock.unlock();
6467 if (skip_call)
6468 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13006469 VkResult result = dev_data->dispatch_table.CreateBufferView(device, pCreateInfo, pAllocator, pView);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006470 if (VK_SUCCESS == result) {
Mark Youngd339ba32016-05-30 13:28:35 -06006471 lock.lock();
Tobin Ehlis8b872462016-09-14 08:12:08 -06006472 dev_data->bufferViewMap[*pView] = unique_ptr<BUFFER_VIEW_STATE>(new BUFFER_VIEW_STATE(*pView, pCreateInfo));
Mark Youngd339ba32016-05-30 13:28:35 -06006473 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006474 }
6475 return result;
6476}
6477
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006478VKAPI_ATTR VkResult VKAPI_CALL CreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
6479 const VkAllocationCallbacks *pAllocator, VkImage *pImage) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006480 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskif20f0942016-03-22 10:07:26 -06006481
Chris Forbesaaa9c282016-10-03 20:01:14 +13006482 VkResult result = dev_data->dispatch_table.CreateImage(device, pCreateInfo, pAllocator, pImage);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006483
6484 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006485 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinski42fe5f72017-01-11 11:36:16 -07006486 PostCallRecordCreateImage(dev_data->imageMap, dev_data->imageSubresourceMap, dev_data->imageLayoutMap, pCreateInfo, pImage);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006487 }
6488 return result;
6489}
6490
6491static void ResolveRemainingLevelsLayers(layer_data *dev_data, VkImageSubresourceRange *range, VkImage image) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07006492 // Expects global_lock to be held by caller
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006493
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006494 auto image_state = getImageState(dev_data, image);
6495 if (image_state) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07006496 // If the caller used the special values VK_REMAINING_MIP_LEVELS and VK_REMAINING_ARRAY_LAYERS, resolve them now in our
6497 // internal state to the actual values.
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006498 if (range->levelCount == VK_REMAINING_MIP_LEVELS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006499 range->levelCount = image_state->createInfo.mipLevels - range->baseMipLevel;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006500 }
6501
6502 if (range->layerCount == VK_REMAINING_ARRAY_LAYERS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006503 range->layerCount = image_state->createInfo.arrayLayers - range->baseArrayLayer;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006504 }
6505 }
6506}
6507
6508// Return the correct layer/level counts if the caller used the special
6509// values VK_REMAINING_MIP_LEVELS or VK_REMAINING_ARRAY_LAYERS.
6510static void ResolveRemainingLevelsLayers(layer_data *dev_data, uint32_t *levels, uint32_t *layers, VkImageSubresourceRange range,
6511 VkImage image) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07006512 // Expects global_lock to be held by caller
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006513
6514 *levels = range.levelCount;
6515 *layers = range.layerCount;
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006516 auto image_state = getImageState(dev_data, image);
6517 if (image_state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006518 if (range.levelCount == VK_REMAINING_MIP_LEVELS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006519 *levels = image_state->createInfo.mipLevels - range.baseMipLevel;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006520 }
6521 if (range.layerCount == VK_REMAINING_ARRAY_LAYERS) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006522 *layers = image_state->createInfo.arrayLayers - range.baseArrayLayer;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006523 }
6524 }
6525}
6526
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006527// For the given format verify that the aspect masks make sense
6528static bool ValidateImageAspectMask(layer_data *dev_data, VkImage image, VkFormat format, VkImageAspectFlags aspect_mask,
6529 const char *func_name) {
6530 bool skip = false;
6531 if (vk_format_is_color(format)) {
6532 if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
6533 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6534 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6535 "%s: Color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6536 validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006537 } else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006538 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6539 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6540 "%s: Color image formats must have ONLY the VK_IMAGE_ASPECT_COLOR_BIT set. %s", func_name,
6541 validation_error_map[VALIDATION_ERROR_00741]);
6542 }
6543 } else if (vk_format_is_depth_and_stencil(format)) {
6544 if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
6545 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6546 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE", "%s: Depth/stencil image formats must have "
6547 "at least one of VK_IMAGE_ASPECT_DEPTH_BIT "
6548 "and VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6549 func_name, validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006550 } else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006551 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6552 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6553 "%s: Combination depth/stencil image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT and "
6554 "VK_IMAGE_ASPECT_STENCIL_BIT set. %s",
6555 func_name, validation_error_map[VALIDATION_ERROR_00741]);
6556 }
6557 } else if (vk_format_is_depth_only(format)) {
6558 if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
6559 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6560 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6561 "%s: Depth-only image formats must have the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6562 validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006563 } else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006564 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6565 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6566 "%s: Depth-only image formats can have only the VK_IMAGE_ASPECT_DEPTH_BIT set. %s", func_name,
6567 validation_error_map[VALIDATION_ERROR_00741]);
6568 }
6569 } else if (vk_format_is_stencil_only(format)) {
6570 if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
6571 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6572 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6573 "%s: Stencil-only image formats must have the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6574 validation_error_map[VALIDATION_ERROR_00741]);
Tobin Ehlisa4306ef2017-01-02 10:04:56 -07006575 } else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006576 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
6577 (uint64_t)image, __LINE__, VALIDATION_ERROR_00741, "IMAGE",
6578 "%s: Stencil-only image formats can have only the VK_IMAGE_ASPECT_STENCIL_BIT set. %s", func_name,
6579 validation_error_map[VALIDATION_ERROR_00741]);
6580 }
6581 }
6582 return skip;
6583}
6584
Tobin Ehlis16239872016-10-26 10:42:49 -06006585static bool PreCallValidateCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info) {
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006586 bool skip = false;
Tobin Ehlis16239872016-10-26 10:42:49 -06006587 IMAGE_STATE *image_state = getImageState(dev_data, create_info->image);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006588 if (image_state) {
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006589 skip |= ValidateImageUsageFlags(
Tobin Ehlis30df15c2016-10-12 17:17:57 -06006590 dev_data, image_state, VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
6591 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07006592 false, -1, "vkCreateImageView()",
Tony Barbour311dcbe2016-08-26 13:01:43 -06006593 "VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT]_BIT");
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06006594 // If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
Tobin Ehlise1995fc2016-12-22 12:45:09 -07006595 skip |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCreateImageView()", VALIDATION_ERROR_02524);
Tobin Ehlis16239872016-10-26 10:42:49 -06006596 // Checks imported from image layer
6597 if (create_info->subresourceRange.baseMipLevel >= image_state->createInfo.mipLevels) {
6598 std::stringstream ss;
6599 ss << "vkCreateImageView called with baseMipLevel " << create_info->subresourceRange.baseMipLevel << " for image "
6600 << create_info->image << " that only has " << image_state->createInfo.mipLevels << " mip levels.";
6601 skip |=
6602 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6603 VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6604 }
6605 if (create_info->subresourceRange.baseArrayLayer >= image_state->createInfo.arrayLayers) {
6606 std::stringstream ss;
6607 ss << "vkCreateImageView called with baseArrayLayer " << create_info->subresourceRange.baseArrayLayer << " for image "
6608 << create_info->image << " that only has " << image_state->createInfo.arrayLayers << " array layers.";
6609 skip |=
6610 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6611 VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6612 }
6613 // TODO: Need new valid usage language for levelCount == 0 & layerCount == 0
6614 if (!create_info->subresourceRange.levelCount) {
6615 std::stringstream ss;
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006616 ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.levelCount.";
Tobin Ehlis16239872016-10-26 10:42:49 -06006617 skip |=
6618 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6619 VALIDATION_ERROR_00768, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00768]);
6620 }
6621 if (!create_info->subresourceRange.layerCount) {
6622 std::stringstream ss;
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006623 ss << "vkCreateImageView called with 0 in pCreateInfo->subresourceRange.layerCount.";
Tobin Ehlis16239872016-10-26 10:42:49 -06006624 skip |=
6625 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6626 VALIDATION_ERROR_00769, "IMAGE", "%s %s", ss.str().c_str(), validation_error_map[VALIDATION_ERROR_00769]);
6627 }
6628
6629 VkImageCreateFlags image_flags = image_state->createInfo.flags;
6630 VkFormat image_format = image_state->createInfo.format;
6631 VkFormat view_format = create_info->format;
6632 VkImageAspectFlags aspect_mask = create_info->subresourceRange.aspectMask;
6633
6634 // Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state
6635 if (image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) {
6636 // Format MUST be compatible (in the same format compatibility class) as the format the image was created with
6637 if (vk_format_get_compatibility_class(image_format) != vk_format_get_compatibility_class(view_format)) {
6638 std::stringstream ss;
6639 ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
6640 << " is not in the same format compatibility class as image (" << (uint64_t)create_info->image << ") format "
6641 << string_VkFormat(image_format) << ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
6642 << "can support ImageViews with differing formats but they must be in the same compatibility class.";
6643 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6644 VALIDATION_ERROR_02171, "IMAGE", "%s %s", ss.str().c_str(),
6645 validation_error_map[VALIDATION_ERROR_02171]);
6646 }
6647 } else {
6648 // Format MUST be IDENTICAL to the format the image was created with
6649 if (image_format != view_format) {
6650 std::stringstream ss;
6651 ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from image "
6652 << (uint64_t)create_info->image << " format " << string_VkFormat(image_format)
6653 << ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
6654 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6655 VALIDATION_ERROR_02172, "IMAGE", "%s %s", ss.str().c_str(),
6656 validation_error_map[VALIDATION_ERROR_02172]);
6657 }
6658 }
6659
6660 // Validate correct image aspect bits for desired formats and format consistency
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06006661 skip |= ValidateImageAspectMask(dev_data, image_state->image, image_format, aspect_mask, "vkCreateImageView()");
Mark Youngd339ba32016-05-30 13:28:35 -06006662 }
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006663 return skip;
Mark Youngd339ba32016-05-30 13:28:35 -06006664}
6665
Tobin Ehlis16239872016-10-26 10:42:49 -06006666static inline void PostCallRecordCreateImageView(layer_data *dev_data, const VkImageViewCreateInfo *create_info, VkImageView view) {
6667 dev_data->imageViewMap[view] = unique_ptr<IMAGE_VIEW_STATE>(new IMAGE_VIEW_STATE(view, create_info));
6668 ResolveRemainingLevelsLayers(dev_data, &dev_data->imageViewMap[view].get()->create_info.subresourceRange, create_info->image);
Mark Youngd339ba32016-05-30 13:28:35 -06006669}
6670
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006671VKAPI_ATTR VkResult VKAPI_CALL CreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
6672 const VkAllocationCallbacks *pAllocator, VkImageView *pView) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006673 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Mark Youngd339ba32016-05-30 13:28:35 -06006674 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006675 bool skip = PreCallValidateCreateImageView(dev_data, pCreateInfo);
Mark Youngd339ba32016-05-30 13:28:35 -06006676 lock.unlock();
Tobin Ehlis4ad9d852016-10-26 08:04:58 -06006677 if (skip)
Mark Youngd339ba32016-05-30 13:28:35 -06006678 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13006679 VkResult result = dev_data->dispatch_table.CreateImageView(device, pCreateInfo, pAllocator, pView);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006680 if (VK_SUCCESS == result) {
Mark Youngd339ba32016-05-30 13:28:35 -06006681 lock.lock();
Tobin Ehlis8b26a382016-09-14 08:02:49 -06006682 PostCallRecordCreateImageView(dev_data, pCreateInfo, *pView);
Mark Youngd339ba32016-05-30 13:28:35 -06006683 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006684 }
Chris Forbes86c586a2016-05-08 10:19:14 +12006685
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006686 return result;
6687}
6688
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006689VKAPI_ATTR VkResult VKAPI_CALL
6690CreateFence(VkDevice device, const VkFenceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkFence *pFence) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006691 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006692 VkResult result = dev_data->dispatch_table.CreateFence(device, pCreateInfo, pAllocator, pFence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006693 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006694 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06006695 auto &fence_node = dev_data->fenceMap[*pFence];
Chris Forbesbc0b8212016-06-10 15:22:37 +12006696 fence_node.fence = *pFence;
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06006697 fence_node.createInfo = *pCreateInfo;
Chris Forbesff96dcd2016-06-16 11:47:24 +12006698 fence_node.state = (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) ? FENCE_RETIRED : FENCE_UNSIGNALED;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006699 }
6700 return result;
6701}
6702
6703// TODO handle pipeline caches
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006704VKAPI_ATTR VkResult VKAPI_CALL CreatePipelineCache(VkDevice device, const VkPipelineCacheCreateInfo *pCreateInfo,
6705 const VkAllocationCallbacks *pAllocator, VkPipelineCache *pPipelineCache) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006706 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006707 VkResult result = dev_data->dispatch_table.CreatePipelineCache(device, pCreateInfo, pAllocator, pPipelineCache);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006708 return result;
6709}
6710
6711VKAPI_ATTR void VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006712DestroyPipelineCache(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006713 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006714 dev_data->dispatch_table.DestroyPipelineCache(device, pipelineCache, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006715}
6716
6717VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006718GetPipelineCacheData(VkDevice device, VkPipelineCache pipelineCache, size_t *pDataSize, void *pData) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006719 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006720 VkResult result = dev_data->dispatch_table.GetPipelineCacheData(device, pipelineCache, pDataSize, pData);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006721 return result;
6722}
6723
6724VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006725MergePipelineCaches(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006726 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006727 VkResult result = dev_data->dispatch_table.MergePipelineCaches(device, dstCache, srcCacheCount, pSrcCaches);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006728 return result;
6729}
6730
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06006731// utility function to set collective state for pipeline
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006732void set_pipeline_state(PIPELINE_STATE *pPipe) {
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06006733 // If any attachment used by this pipeline has blendEnable, set top-level blendEnable
6734 if (pPipe->graphicsPipelineCI.pColorBlendState) {
6735 for (size_t i = 0; i < pPipe->attachments.size(); ++i) {
6736 if (VK_TRUE == pPipe->attachments[i].blendEnable) {
6737 if (((pPipe->attachments[i].dstAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6738 (pPipe->attachments[i].dstAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6739 ((pPipe->attachments[i].dstColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6740 (pPipe->attachments[i].dstColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6741 ((pPipe->attachments[i].srcAlphaBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6742 (pPipe->attachments[i].srcAlphaBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA)) ||
6743 ((pPipe->attachments[i].srcColorBlendFactor >= VK_BLEND_FACTOR_CONSTANT_COLOR) &&
6744 (pPipe->attachments[i].srcColorBlendFactor <= VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA))) {
6745 pPipe->blendConstantsEnabled = true;
6746 }
6747 }
6748 }
6749 }
6750}
6751
Mark Lobodzinski7f170b32016-11-16 10:05:30 -07006752static bool PreCallCreateGraphicsPipelines(layer_data *device_data, uint32_t count,
6753 const VkGraphicsPipelineCreateInfo *create_infos, vector<PIPELINE_STATE *> &pipe_state) {
6754 bool skip = false;
Mark Lobodzinskica60e142016-11-16 11:12:30 -07006755 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(device_data->instance_data->instance), instance_layer_data_map);
Mark Lobodzinski7f170b32016-11-16 10:05:30 -07006756
6757 for (uint32_t i = 0; i < count; i++) {
6758 skip |= verifyPipelineCreateState(device_data, pipe_state, i);
Mark Lobodzinskica60e142016-11-16 11:12:30 -07006759 if (create_infos[i].pVertexInputState != NULL) {
6760 for (uint32_t j = 0; j < create_infos[i].pVertexInputState->vertexAttributeDescriptionCount; j++) {
6761 VkFormat format = create_infos[i].pVertexInputState->pVertexAttributeDescriptions[j].format;
6762 // Internal call to get format info. Still goes through layers, could potentially go directly to ICD.
6763 VkFormatProperties properties;
6764 instance_data->dispatch_table.GetPhysicalDeviceFormatProperties(device_data->physical_device, format, &properties);
6765 if ((properties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) == 0) {
6766 skip |= log_msg(
6767 device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
6768 __LINE__, VALIDATION_ERROR_01413, "IMAGE",
6769 "vkCreateGraphicsPipelines: pCreateInfo[%d].pVertexInputState->vertexAttributeDescriptions[%d].format "
6770 "(%s) is not a supported vertex buffer format. %s",
6771 i, j, string_VkFormat(format), validation_error_map[VALIDATION_ERROR_01413]);
6772 }
6773 }
6774 }
Mark Lobodzinski7f170b32016-11-16 10:05:30 -07006775 }
6776 return skip;
6777}
6778
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006779VKAPI_ATTR VkResult VKAPI_CALL
6780CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6781 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6782 VkPipeline *pPipelines) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006783 // TODO What to do with pipelineCache?
6784 // The order of operations here is a little convoluted but gets the job done
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006785 // 1. Pipeline create state is first shadowed into PIPELINE_STATE struct
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006786 // 2. Create state is then validated (which uses flags setup during shadowing)
6787 // 3. If everything looks good, we'll then create the pipeline and add NODE to pipelineMap
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006788 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006789 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006790 vector<PIPELINE_STATE *> pipe_state(count);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006791 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6792
6793 uint32_t i = 0;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006794 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006795
6796 for (i = 0; i < count; i++) {
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006797 pipe_state[i] = new PIPELINE_STATE;
6798 pipe_state[i]->initGraphicsPipeline(&pCreateInfos[i]);
6799 pipe_state[i]->render_pass_ci.initialize(getRenderPassState(dev_data, pCreateInfos[i].renderPass)->createInfo.ptr());
6800 pipe_state[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006801 }
Mark Lobodzinski1f34f6f2016-11-16 10:11:02 -07006802 skip |= PreCallCreateGraphicsPipelines(dev_data, count, pCreateInfos, pipe_state);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006803
Chris Forbes56754e22016-11-30 14:24:32 +13006804 if (skip) {
6805 for (i = 0; i < count; i++) {
6806 delete pipe_state[i];
Chris Forbes83e91ad2016-11-30 14:26:50 +13006807 pPipelines[i] = VK_NULL_HANDLE;
Chris Forbes56754e22016-11-30 14:24:32 +13006808 }
Chris Forbes78a69c62016-11-30 14:39:24 +13006809 return VK_ERROR_VALIDATION_FAILED_EXT;
6810 }
6811
6812 lock.unlock();
6813 auto result = dev_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6814 lock.lock();
6815 for (i = 0; i < count; i++) {
Chris Forbesb186edf2016-11-30 14:41:35 +13006816 if (pPipelines[i] == VK_NULL_HANDLE) {
6817 delete pipe_state[i];
6818 }
6819 else {
6820 pipe_state[i]->pipeline = pPipelines[i];
6821 dev_data->pipelineMap[pipe_state[i]->pipeline] = pipe_state[i];
6822 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006823 }
Chris Forbes56754e22016-11-30 14:24:32 +13006824
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006825 return result;
6826}
6827
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006828VKAPI_ATTR VkResult VKAPI_CALL
6829CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t count,
6830 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
6831 VkPipeline *pPipelines) {
Chris Forbes183f4f92016-11-30 14:35:52 +13006832 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006833
6834 // TODO : Improve this data struct w/ unique_ptrs so cleanup below is automatic
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006835 vector<PIPELINE_STATE *> pPipeState(count);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006836 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
6837
6838 uint32_t i = 0;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006839 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006840 for (i = 0; i < count; i++) {
6841 // TODO: Verify compute stage bits
6842
6843 // Create and initialize internal tracking data structure
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006844 pPipeState[i] = new PIPELINE_STATE;
6845 pPipeState[i]->initComputePipeline(&pCreateInfos[i]);
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06006846 pPipeState[i]->pipeline_layout = *getPipelineLayout(dev_data, pCreateInfos[i].layout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006847
6848 // TODO: Add Compute Pipeline Verification
Chris Forbes183f4f92016-11-30 14:35:52 +13006849 skip |= !validate_compute_pipeline(dev_data->report_data, pPipeState[i], &dev_data->enabled_features,
Tobin Ehlisfe871282016-06-28 10:28:02 -06006850 dev_data->shaderModuleMap);
Chris Forbes183f4f92016-11-30 14:35:52 +13006851 // skip |= verifyPipelineCreateState(dev_data, pPipeState[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006852 }
6853
Chris Forbes78a69c62016-11-30 14:39:24 +13006854 if (skip) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006855 for (i = 0; i < count; i++) {
6856 // Clean up any locally allocated data structures
Tobin Ehlis52c76a32016-10-12 09:05:51 -06006857 delete pPipeState[i];
Chris Forbes3224f952016-11-30 14:44:03 +13006858 pPipelines[i] = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006859 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006860 return VK_ERROR_VALIDATION_FAILED_EXT;
6861 }
Chris Forbes78a69c62016-11-30 14:39:24 +13006862
6863 lock.unlock();
6864 auto result = dev_data->dispatch_table.CreateComputePipelines(device, pipelineCache, count, pCreateInfos, pAllocator, pPipelines);
6865 lock.lock();
6866 for (i = 0; i < count; i++) {
Chris Forbes3224f952016-11-30 14:44:03 +13006867 if (pPipelines[i] == VK_NULL_HANDLE) {
6868 delete pPipeState[i];
6869 }
6870 else {
6871 pPipeState[i]->pipeline = pPipelines[i];
6872 dev_data->pipelineMap[pPipeState[i]->pipeline] = pPipeState[i];
6873 }
Chris Forbes78a69c62016-11-30 14:39:24 +13006874 }
6875
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006876 return result;
6877}
6878
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006879VKAPI_ATTR VkResult VKAPI_CALL CreateSampler(VkDevice device, const VkSamplerCreateInfo *pCreateInfo,
6880 const VkAllocationCallbacks *pAllocator, VkSampler *pSampler) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006881 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13006882 VkResult result = dev_data->dispatch_table.CreateSampler(device, pCreateInfo, pAllocator, pSampler);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006883 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06006884 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisfad7adf2016-10-20 06:50:37 -06006885 dev_data->samplerMap[*pSampler] = unique_ptr<SAMPLER_STATE>(new SAMPLER_STATE(pSampler, pCreateInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006886 }
6887 return result;
6888}
6889
Tobin Ehlis154c2692016-10-25 09:36:53 -06006890static bool PreCallValidateCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info) {
6891 if (dev_data->instance_data->disabled.create_descriptor_set_layout)
6892 return false;
6893 return cvdescriptorset::DescriptorSetLayout::ValidateCreateInfo(dev_data->report_data, create_info);
6894}
6895
6896static void PostCallRecordCreateDescriptorSetLayout(layer_data *dev_data, const VkDescriptorSetLayoutCreateInfo *create_info,
6897 VkDescriptorSetLayout set_layout) {
Tobin Ehlisfdcb63f2016-10-25 20:56:47 -06006898 // TODO: Convert this to unique_ptr to avoid leaks
Tobin Ehlis154c2692016-10-25 09:36:53 -06006899 dev_data->descriptorSetLayoutMap[set_layout] = new cvdescriptorset::DescriptorSetLayout(create_info, set_layout);
6900}
6901
Chia-I Wu629d7cd2016-05-06 11:32:54 +08006902VKAPI_ATTR VkResult VKAPI_CALL
6903CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
6904 const VkAllocationCallbacks *pAllocator, VkDescriptorSetLayout *pSetLayout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006905 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis154c2692016-10-25 09:36:53 -06006906 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
6907 std::unique_lock<std::mutex> lock(global_lock);
6908 bool skip = PreCallValidateCreateDescriptorSetLayout(dev_data, pCreateInfo);
6909 if (!skip) {
6910 lock.unlock();
6911 result = dev_data->dispatch_table.CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
6912 if (VK_SUCCESS == result) {
6913 lock.lock();
6914 PostCallRecordCreateDescriptorSetLayout(dev_data, pCreateInfo, *pSetLayout);
6915 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07006916 }
6917 return result;
6918}
6919
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006920// Used by CreatePipelineLayout and CmdPushConstants.
6921// Note that the index argument is optional and only used by CreatePipelineLayout.
6922static bool validatePushConstantRange(const layer_data *dev_data, const uint32_t offset, const uint32_t size,
6923 const char *caller_name, uint32_t index = 0) {
Chris Forbesa13fe522016-10-13 15:34:59 +13006924 if (dev_data->instance_data->disabled.push_constant_range)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06006925 return false;
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006926 uint32_t const maxPushConstantsSize = dev_data->phys_dev_properties.properties.limits.maxPushConstantsSize;
Tobin Ehlisfe871282016-06-28 10:28:02 -06006927 bool skip_call = false;
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006928 // Check that offset + size don't exceed the max.
6929 // Prevent arithetic overflow here by avoiding addition and testing in this order.
6930 if ((offset >= maxPushConstantsSize) || (size > maxPushConstantsSize - offset)) {
6931 // This is a pain just to adapt the log message to the caller, but better to sort it out only when there is a problem.
6932 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
Tobin Ehlis2765e462016-11-23 10:47:26 -07006933 if (offset >= maxPushConstantsSize) {
6934 skip_call |=
6935 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6936 VALIDATION_ERROR_00877, "DS", "%s call has push constants index %u with offset %u that "
6937 "exceeds this device's maxPushConstantSize of %u. %s",
6938 caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00877]);
6939 }
6940 if (size > maxPushConstantsSize - offset) {
6941 skip_call |=
6942 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6943 VALIDATION_ERROR_00880, "DS", "%s call has push constants index %u with offset %u and size %u that "
6944 "exceeds this device's maxPushConstantSize of %u. %s",
6945 caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00880]);
6946 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006947 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
Dave Houlton197211a2016-12-23 15:26:29 -07006948 if (offset >= maxPushConstantsSize) {
6949 skip_call |=
6950 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6951 VALIDATION_ERROR_00991, "DS", "%s call has push constants index %u with offset %u that "
6952 "exceeds this device's maxPushConstantSize of %u. %s",
6953 caller_name, index, offset, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00991]);
6954 }
6955 if (size > maxPushConstantsSize - offset) {
6956 skip_call |=
6957 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6958 VALIDATION_ERROR_00992, "DS", "%s call has push constants index %u with offset %u and size %u that "
6959 "exceeds this device's maxPushConstantSize of %u. %s",
6960 caller_name, index, offset, size, maxPushConstantsSize, validation_error_map[VALIDATION_ERROR_00992]);
6961 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006962 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06006963 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6964 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006965 }
6966 }
6967 // size needs to be non-zero and a multiple of 4.
6968 if ((size == 0) || ((size & 0x3) != 0)) {
6969 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
Tobin Ehlisfe699542016-11-23 09:41:12 -07006970 if (size == 0) {
6971 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6972 __LINE__, VALIDATION_ERROR_00878, "DS", "%s call has push constants index %u with "
6973 "size %u. Size must be greater than zero. %s",
6974 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00878]);
6975 }
6976 if (size & 0x3) {
6977 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6978 __LINE__, VALIDATION_ERROR_00879, "DS", "%s call has push constants index %u with "
6979 "size %u. Size must be a multiple of 4. %s",
6980 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00879]);
6981 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006982 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
Dave Houlton197211a2016-12-23 15:26:29 -07006983 if (size == 0) {
6984 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6985 __LINE__, VALIDATION_ERROR_01000, "DS", "%s call has push constants index %u with "
6986 "size %u. Size must be greater than zero. %s",
6987 caller_name, index, size, validation_error_map[VALIDATION_ERROR_01000]);
6988 }
6989 if (size & 0x3) {
6990 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
6991 __LINE__, VALIDATION_ERROR_00990, "DS", "%s call has push constants index %u with "
6992 "size %u. Size must be a multiple of 4. %s",
6993 caller_name, index, size, validation_error_map[VALIDATION_ERROR_00990]);
6994 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006995 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06006996 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
6997 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06006998 }
6999 }
7000 // offset needs to be a multiple of 4.
7001 if ((offset & 0x3) != 0) {
7002 if (0 == strcmp(caller_name, "vkCreatePipelineLayout()")) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007003 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007004 VALIDATION_ERROR_02521, "DS", "%s call has push constants index %u with "
7005 "offset %u. Offset must be a multiple of 4. %s",
7006 caller_name, index, offset, validation_error_map[VALIDATION_ERROR_02521]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007007 } else if (0 == strcmp(caller_name, "vkCmdPushConstants()")) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007008 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007009 VALIDATION_ERROR_00989, "DS", "%s call has push constants with "
7010 "offset %u. Offset must be a multiple of 4. %s",
7011 caller_name, offset, validation_error_map[VALIDATION_ERROR_00989]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007012 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007013 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7014 DRAWSTATE_INTERNAL_ERROR, "DS", "%s caller not supported.", caller_name);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007015 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007016 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06007017 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007018}
7019
Dave Houlton197211a2016-12-23 15:26:29 -07007020VKAPI_ATTR VkResult VKAPI_CALL
7021CreatePipelineLayout(VkDevice device, const VkPipelineLayoutCreateInfo *pCreateInfo,
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007022 const VkAllocationCallbacks *pAllocator, VkPipelineLayout *pPipelineLayout) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007023 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007024 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisbf98b692016-10-06 12:58:06 -06007025 // TODO : Add checks for VALIDATION_ERRORS 865-871
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007026 // Push Constant Range checks
Karl Schultzb7c6d0f2016-09-13 14:23:19 -06007027 uint32_t i, j;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007028 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007029 skip_call |= validatePushConstantRange(dev_data, pCreateInfo->pPushConstantRanges[i].offset,
7030 pCreateInfo->pPushConstantRanges[i].size, "vkCreatePipelineLayout()", i);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007031 if (0 == pCreateInfo->pPushConstantRanges[i].stageFlags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007032 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007033 VALIDATION_ERROR_00882, "DS", "vkCreatePipelineLayout() call has no stageFlags set. %s",
7034 validation_error_map[VALIDATION_ERROR_00882]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007035 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007036 }
Karl Schultzb7c6d0f2016-09-13 14:23:19 -06007037 if (skip_call)
7038 return VK_ERROR_VALIDATION_FAILED_EXT;
7039
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007040 // Each range has been validated. Now check for overlap between ranges (if they are good).
Karl Schultzb7c6d0f2016-09-13 14:23:19 -06007041 // There's no explicit Valid Usage language against this, so issue a warning instead of an error.
7042 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
7043 for (j = i + 1; j < pCreateInfo->pushConstantRangeCount; ++j) {
7044 const uint32_t minA = pCreateInfo->pPushConstantRanges[i].offset;
7045 const uint32_t maxA = minA + pCreateInfo->pPushConstantRanges[i].size;
7046 const uint32_t minB = pCreateInfo->pPushConstantRanges[j].offset;
7047 const uint32_t maxB = minB + pCreateInfo->pPushConstantRanges[j].size;
7048 if ((minA <= minB && maxA > minB) || (minB <= minA && maxB > minA)) {
7049 skip_call |=
7050 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7051 DRAWSTATE_PUSH_CONSTANTS_ERROR, "DS", "vkCreatePipelineLayout() call has push constants with "
7052 "overlapping ranges: %u:[%u, %u), %u:[%u, %u)",
7053 i, minA, maxA, j, minB, maxB);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06007054 }
7055 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007056 }
Chris Forbescf7615e2016-05-10 16:02:49 +12007057
Chris Forbesaaa9c282016-10-03 20:01:14 +13007058 VkResult result = dev_data->dispatch_table.CreatePipelineLayout(device, pCreateInfo, pAllocator, pPipelineLayout);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007059 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007060 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007061 PIPELINE_LAYOUT_NODE &plNode = dev_data->pipelineLayoutMap[*pPipelineLayout];
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007062 plNode.layout = *pPipelineLayout;
Tobin Ehlis3df41292016-07-07 09:23:38 -06007063 plNode.set_layouts.resize(pCreateInfo->setLayoutCount);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007064 for (i = 0; i < pCreateInfo->setLayoutCount; ++i) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06007065 plNode.set_layouts[i] = getDescriptorSetLayout(dev_data, pCreateInfo->pSetLayouts[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007066 }
Tobin Ehlis3df41292016-07-07 09:23:38 -06007067 plNode.push_constant_ranges.resize(pCreateInfo->pushConstantRangeCount);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007068 for (i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
Tobin Ehlis3df41292016-07-07 09:23:38 -06007069 plNode.push_constant_ranges[i] = pCreateInfo->pPushConstantRanges[i];
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007070 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007071 }
7072 return result;
7073}
7074
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007075VKAPI_ATTR VkResult VKAPI_CALL
7076CreateDescriptorPool(VkDevice device, const VkDescriptorPoolCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
7077 VkDescriptorPool *pDescriptorPool) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007078 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13007079 VkResult result = dev_data->dispatch_table.CreateDescriptorPool(device, pCreateInfo, pAllocator, pDescriptorPool);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007080 if (VK_SUCCESS == result) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007081 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
Mark Muelleraab36502016-05-03 13:17:29 -06007082 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS", "Created Descriptor Pool 0x%" PRIxLEAST64,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007083 (uint64_t)*pDescriptorPool))
7084 return VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007085 DESCRIPTOR_POOL_STATE *pNewNode = new DESCRIPTOR_POOL_STATE(*pDescriptorPool, pCreateInfo);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007086 if (NULL == pNewNode) {
7087 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
7088 (uint64_t)*pDescriptorPool, __LINE__, DRAWSTATE_OUT_OF_MEMORY, "DS",
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007089 "Out of memory while attempting to allocate DESCRIPTOR_POOL_STATE in vkCreateDescriptorPool()"))
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007090 return VK_ERROR_VALIDATION_FAILED_EXT;
7091 } else {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007092 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007093 dev_data->descriptorPoolMap[*pDescriptorPool] = pNewNode;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007094 }
7095 } else {
7096 // Need to do anything if pool create fails?
7097 }
7098 return result;
7099}
7100
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007101VKAPI_ATTR VkResult VKAPI_CALL
7102ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags) {
Tobin Ehlis75f04ec2016-10-06 17:43:11 -06007103 // TODO : Add checks for VALIDATION_ERROR_00928
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007104 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13007105 VkResult result = dev_data->dispatch_table.ResetDescriptorPool(device, descriptorPool, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007106 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007107 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007108 clearDescriptorPool(dev_data, device, descriptorPool, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007109 }
7110 return result;
7111}
Chris Forbesb3f60062016-05-23 11:29:51 +12007112// Ensure the pool contains enough descriptors and descriptor sets to satisfy
Tobin Ehlisbdb28002016-06-01 11:56:42 -06007113// an allocation request. Fills common_data with the total number of descriptors of each type required,
7114// as well as DescriptorSetLayout ptrs used for later update.
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007115static bool PreCallValidateAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
7116 cvdescriptorset::AllocateDescriptorSetsData *common_data) {
Chris Forbesa13fe522016-10-13 15:34:59 +13007117 if (dev_data->instance_data->disabled.allocate_descriptor_sets)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06007118 return false;
Tobin Ehlisee471462016-05-26 11:21:59 -06007119 // All state checks for AllocateDescriptorSets is done in single function
Tobin Ehlis815e8132016-06-02 13:02:17 -06007120 return cvdescriptorset::ValidateAllocateDescriptorSets(dev_data->report_data, pAllocateInfo, dev_data, common_data);
Tobin Ehlisee471462016-05-26 11:21:59 -06007121}
7122// Allocation state was good and call down chain was made so update state based on allocating descriptor sets
7123static void PostCallRecordAllocateDescriptorSets(layer_data *dev_data, const VkDescriptorSetAllocateInfo *pAllocateInfo,
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007124 VkDescriptorSet *pDescriptorSets,
7125 const cvdescriptorset::AllocateDescriptorSetsData *common_data) {
Tobin Ehlisee471462016-05-26 11:21:59 -06007126 // All the updates are contained in a single cvdescriptorset function
Tobin Ehlisd5fb09e2016-06-02 10:54:09 -06007127 cvdescriptorset::PerformAllocateDescriptorSets(pAllocateInfo, pDescriptorSets, common_data, &dev_data->descriptorPoolMap,
Tobin Ehlis4e380592016-06-02 12:41:47 -06007128 &dev_data->setMap, dev_data);
Chris Forbesb3f60062016-05-23 11:29:51 +12007129}
7130
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007131VKAPI_ATTR VkResult VKAPI_CALL
7132AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo, VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007133 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007134 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007135 cvdescriptorset::AllocateDescriptorSetsData common_data(pAllocateInfo->descriptorSetCount);
7136 bool skip_call = PreCallValidateAllocateDescriptorSets(dev_data, pAllocateInfo, &common_data);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007137 lock.unlock();
Chris Forbes70858bb2016-05-20 17:12:39 +12007138
Tobin Ehlisee471462016-05-26 11:21:59 -06007139 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007140 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbes70858bb2016-05-20 17:12:39 +12007141
Chris Forbesaaa9c282016-10-03 20:01:14 +13007142 VkResult result = dev_data->dispatch_table.AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Chris Forbesbc22feb2016-05-20 16:34:01 +12007143
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007144 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007145 lock.lock();
Tobin Ehlis68d0adf2016-06-01 11:33:50 -06007146 PostCallRecordAllocateDescriptorSets(dev_data, pAllocateInfo, pDescriptorSets, &common_data);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007147 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007148 }
7149 return result;
7150}
Tobin Ehlis2c763302016-05-26 13:30:45 -06007151// Verify state before freeing DescriptorSets
7152static bool PreCallValidateFreeDescriptorSets(const layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7153 const VkDescriptorSet *descriptor_sets) {
Chris Forbesa13fe522016-10-13 15:34:59 +13007154 if (dev_data->instance_data->disabled.free_descriptor_sets)
Tobin Ehlisf0e83a32016-10-06 14:16:14 -06007155 return false;
Tobin Ehlis2c763302016-05-26 13:30:45 -06007156 bool skip_call = false;
7157 // First make sure sets being destroyed are not currently in-use
7158 for (uint32_t i = 0; i < count; ++i)
7159 skip_call |= validateIdleDescriptorSet(dev_data, descriptor_sets[i], "vkFreeDescriptorSets");
7160
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007161 DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
7162 if (pool_state && !(VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT & pool_state->createInfo.flags)) {
Tobin Ehlis2c763302016-05-26 13:30:45 -06007163 // Can't Free from a NON_FREE pool
7164 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT,
Tobin Ehlisbf98b692016-10-06 12:58:06 -06007165 reinterpret_cast<uint64_t &>(pool), __LINE__, VALIDATION_ERROR_00922, "DS",
Tobin Ehlis2c763302016-05-26 13:30:45 -06007166 "It is invalid to call vkFreeDescriptorSets() with a pool created without setting "
Tobin Ehlisbf98b692016-10-06 12:58:06 -06007167 "VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT. %s",
7168 validation_error_map[VALIDATION_ERROR_00922]);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007169 }
7170 return skip_call;
7171}
7172// Sets have been removed from the pool so update underlying state
7173static void PostCallRecordFreeDescriptorSets(layer_data *dev_data, VkDescriptorPool pool, uint32_t count,
7174 const VkDescriptorSet *descriptor_sets) {
Tobin Ehlisbd711bd2016-10-12 14:27:30 -06007175 DESCRIPTOR_POOL_STATE *pool_state = getDescriptorPoolState(dev_data, pool);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007176 // Update available descriptor sets in pool
7177 pool_state->availableSets += count;
7178
7179 // For each freed descriptor add its resources back into the pool as available and remove from pool and setMap
7180 for (uint32_t i = 0; i < count; ++i) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007181 auto descriptor_set = dev_data->setMap[descriptor_sets[i]];
Tobin Ehlis2c763302016-05-26 13:30:45 -06007182 uint32_t type_index = 0, descriptor_count = 0;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007183 for (uint32_t j = 0; j < descriptor_set->GetBindingCount(); ++j) {
7184 type_index = static_cast<uint32_t>(descriptor_set->GetTypeFromIndex(j));
7185 descriptor_count = descriptor_set->GetDescriptorCountFromIndex(j);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007186 pool_state->availableDescriptorTypeCount[type_index] += descriptor_count;
7187 }
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007188 freeDescriptorSet(dev_data, descriptor_set);
7189 pool_state->sets.erase(descriptor_set);
Tobin Ehlis2c763302016-05-26 13:30:45 -06007190 }
7191}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007192
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007193VKAPI_ATTR VkResult VKAPI_CALL
7194FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count, const VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007195 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
7196 // Make sure that no sets being destroyed are in-flight
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007197 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisfe871282016-06-28 10:28:02 -06007198 bool skip_call = PreCallValidateFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007199 lock.unlock();
Tobin Ehlis25e27ab2016-07-13 09:41:09 -06007200
Tobin Ehlisfe871282016-06-28 10:28:02 -06007201 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007202 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13007203 VkResult result = dev_data->dispatch_table.FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007204 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007205 lock.lock();
Tobin Ehlis2c763302016-05-26 13:30:45 -06007206 PostCallRecordFreeDescriptorSets(dev_data, descriptorPool, count, pDescriptorSets);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007207 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007208 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007209 return result;
7210}
Tobin Ehlis300888c2016-05-18 13:43:26 -06007211// TODO : This is a Proof-of-concept for core validation architecture
7212// Really we'll want to break out these functions to separate files but
7213// keeping it all together here to prove out design
7214// PreCallValidate* handles validating all of the state prior to calling down chain to UpdateDescriptorSets()
7215static bool PreCallValidateUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7216 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7217 const VkCopyDescriptorSet *pDescriptorCopies) {
Chris Forbesa13fe522016-10-13 15:34:59 +13007218 if (dev_data->instance_data->disabled.update_descriptor_sets)
Tobin Ehlisc67108b2016-10-10 11:14:52 -06007219 return false;
Tobin Ehlis300888c2016-05-18 13:43:26 -06007220 // First thing to do is perform map look-ups.
7221 // NOTE : UpdateDescriptorSets is somewhat unique in that it's operating on a number of DescriptorSets
7222 // so we can't just do a single map look-up up-front, but do them individually in functions below
7223
7224 // Now make call(s) that validate state, but don't perform state updates in this function
7225 // Note, here DescriptorSets is unique in that we don't yet have an instance. Using a helper function in the
7226 // namespace which will parse params and make calls into specific class instances
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06007227 return cvdescriptorset::ValidateUpdateDescriptorSets(dev_data->report_data, dev_data, descriptorWriteCount, pDescriptorWrites,
7228 descriptorCopyCount, pDescriptorCopies);
Tobin Ehlis300888c2016-05-18 13:43:26 -06007229}
7230// PostCallRecord* handles recording state updates following call down chain to UpdateDescriptorSets()
7231static void PostCallRecordUpdateDescriptorSets(layer_data *dev_data, uint32_t descriptorWriteCount,
7232 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
7233 const VkCopyDescriptorSet *pDescriptorCopies) {
Tobin Ehlis6a72dc72016-06-01 16:41:17 -06007234 cvdescriptorset::PerformUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
Tobin Ehlis300888c2016-05-18 13:43:26 -06007235 pDescriptorCopies);
7236}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007237
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007238VKAPI_ATTR void VKAPI_CALL
7239UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet *pDescriptorWrites,
7240 uint32_t descriptorCopyCount, const VkCopyDescriptorSet *pDescriptorCopies) {
Tobin Ehlis300888c2016-05-18 13:43:26 -06007241 // Only map look-up at top level is for device-level layer_data
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007242 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007243 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis300888c2016-05-18 13:43:26 -06007244 bool skip_call = PreCallValidateUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7245 pDescriptorCopies);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007246 lock.unlock();
Tobin Ehlis300888c2016-05-18 13:43:26 -06007247 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13007248 dev_data->dispatch_table.UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7249 pDescriptorCopies);
Tobin Ehlis300888c2016-05-18 13:43:26 -06007250 lock.lock();
7251 // Since UpdateDescriptorSets() is void, nothing to check prior to updating state
7252 PostCallRecordUpdateDescriptorSets(dev_data, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
7253 pDescriptorCopies);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007254 }
7255}
7256
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007257VKAPI_ATTR VkResult VKAPI_CALL
7258AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pCreateInfo, VkCommandBuffer *pCommandBuffer) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007259 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +13007260 VkResult result = dev_data->dispatch_table.AllocateCommandBuffers(device, pCreateInfo, pCommandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007261 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007262 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbesc25c8452016-06-21 14:32:00 +12007263 auto pPool = getCommandPoolNode(dev_data, pCreateInfo->commandPool);
7264
7265 if (pPool) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007266 for (uint32_t i = 0; i < pCreateInfo->commandBufferCount; i++) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007267 // Add command buffer to its commandPool map
Chris Forbesc25c8452016-06-21 14:32:00 +12007268 pPool->commandBuffers.push_back(pCommandBuffer[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007269 GLOBAL_CB_NODE *pCB = new GLOBAL_CB_NODE;
7270 // Add command buffer to map
7271 dev_data->commandBufferMap[pCommandBuffer[i]] = pCB;
7272 resetCB(dev_data, pCommandBuffer[i]);
7273 pCB->createInfo = *pCreateInfo;
7274 pCB->device = device;
7275 }
7276 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007277 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007278 }
7279 return result;
7280}
7281
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007282// Add bindings between the given cmd buffer & framebuffer and the framebuffer's children
Tobin Ehlis04c04272016-10-12 11:54:09 -06007283static void AddFramebufferBinding(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, FRAMEBUFFER_STATE *fb_state) {
Tobin Ehlise5df29e2016-10-12 13:18:20 -06007284 addCommandBufferBinding(&fb_state->cb_bindings,
7285 {reinterpret_cast<uint64_t &>(fb_state->framebuffer), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT},
7286 cb_state);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007287 for (auto attachment : fb_state->attachments) {
7288 auto view_state = attachment.view_state;
7289 if (view_state) {
Tobin Ehlis15b8ea02016-09-19 14:02:58 -06007290 AddCommandBufferBindingImageView(dev_data, cb_state, view_state);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007291 }
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06007292 auto rp_state = getRenderPassState(dev_data, fb_state->createInfo.renderPass);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -06007293 if (rp_state) {
7294 addCommandBufferBinding(
7295 &rp_state->cb_bindings,
7296 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7297 }
7298 }
7299}
7300
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007301VKAPI_ATTR VkResult VKAPI_CALL
7302BeginCommandBuffer(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo *pBeginInfo) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007303 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007304 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007305 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007306 // Validate command buffer level
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007307 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
7308 if (cb_node) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007309 // This implicitly resets the Cmd Buffer so make sure any fence is done and then clear memory references
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06007310 if (dev_data->globalInFlightCmdBuffers.count(commandBuffer)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007311 skip_call |=
Tobin Ehlisaff7ae92016-04-18 15:45:20 -06007312 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007313 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00104, "MEM",
Mike Weiblencce7ec72016-10-17 19:33:05 -06007314 "Calling vkBeginCommandBuffer() on active command buffer 0x%p before it has completed. "
Dave Houlton197211a2016-12-23 15:26:29 -07007315 "You must check command buffer fence before this call. %s",
7316 commandBuffer, validation_error_map[VALIDATION_ERROR_00104]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007317 }
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007318 clear_cmd_buf_and_mem_references(dev_data, cb_node);
7319 if (cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007320 // Secondary Command Buffer
7321 const VkCommandBufferInheritanceInfo *pInfo = pBeginInfo->pInheritanceInfo;
7322 if (!pInfo) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007323 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007324 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007325 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00106, "DS",
7326 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must have inheritance info. %s",
7327 commandBuffer, validation_error_map[VALIDATION_ERROR_00106]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007328 } else {
7329 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
Tobin Ehlis8fc7df22017-01-04 09:21:17 -07007330 // Object_tracker makes sure these objects are valid
7331 assert(pInfo->renderPass);
7332 assert(pInfo->framebuffer);
7333 string errorString = "";
7334 auto framebuffer = getFramebufferState(dev_data, pInfo->framebuffer);
7335 if (framebuffer) {
7336 if ((framebuffer->createInfo.renderPass != pInfo->renderPass) &&
7337 !verify_renderpass_compatibility(dev_data, framebuffer->renderPassCreateInfo.ptr(),
7338 getRenderPassState(dev_data, pInfo->renderPass)->createInfo.ptr(),
7339 errorString)) {
7340 // renderPass that framebuffer was created with must be compatible with local renderPass
7341 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7342 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
7343 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00112, "DS",
7344 "vkBeginCommandBuffer(): Secondary Command "
7345 "Buffer (0x%p) renderPass (0x%" PRIxLEAST64 ") is incompatible w/ framebuffer "
7346 "(0x%" PRIxLEAST64 ") w/ render pass (0x%" PRIxLEAST64 ") due to: %s. %s",
7347 commandBuffer, reinterpret_cast<const uint64_t &>(pInfo->renderPass),
7348 reinterpret_cast<const uint64_t &>(pInfo->framebuffer),
7349 reinterpret_cast<uint64_t &>(framebuffer->createInfo.renderPass),
7350 errorString.c_str(), validation_error_map[VALIDATION_ERROR_00112]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007351 }
Tobin Ehlis8fc7df22017-01-04 09:21:17 -07007352 // Connect this framebuffer and its children to this cmdBuffer
7353 AddFramebufferBinding(dev_data, cb_node, framebuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007354 }
7355 }
Dave Houlton197211a2016-12-23 15:26:29 -07007356 if ((pInfo->occlusionQueryEnable == VK_FALSE || dev_data->enabled_features.occlusionQueryPrecise == VK_FALSE) &&
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007357 (pInfo->queryFlags & VK_QUERY_CONTROL_PRECISE_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007358 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7359 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(commandBuffer),
Dave Houlton197211a2016-12-23 15:26:29 -07007360 __LINE__, VALIDATION_ERROR_00107, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007361 "vkBeginCommandBuffer(): Secondary Command Buffer (0x%p) must not have "
7362 "VK_QUERY_CONTROL_PRECISE_BIT if occulusionQuery is disabled or the device does not "
Dave Houlton197211a2016-12-23 15:26:29 -07007363 "support precise occlusion queries. %s",
7364 commandBuffer, validation_error_map[VALIDATION_ERROR_00107]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007365 }
7366 }
7367 if (pInfo && pInfo->renderPass != VK_NULL_HANDLE) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06007368 auto renderPass = getRenderPassState(dev_data, pInfo->renderPass);
Chris Forbes967c4682016-05-17 11:36:23 +12007369 if (renderPass) {
Chris Forbesef730462016-09-27 12:03:31 +13007370 if (pInfo->subpass >= renderPass->createInfo.subpassCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007371 skip_call |= log_msg(
Dave Houlton197211a2016-12-23 15:26:29 -07007372 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7373 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)commandBuffer, __LINE__,
7374 VALIDATION_ERROR_00111, "DS",
7375 "vkBeginCommandBuffer(): Secondary Command Buffers (0x%p) must have a subpass index (%d) "
7376 "that is less than the number of subpasses (%d). %s",
7377 commandBuffer, pInfo->subpass, renderPass->createInfo.subpassCount,
7378 validation_error_map[VALIDATION_ERROR_00111]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007379 }
7380 }
7381 }
7382 }
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007383 if (CB_RECORDING == cb_node->state) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007384 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007385 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007386 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00103, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07007387 "vkBeginCommandBuffer(): Cannot call Begin on command buffer (0x%p"
Dave Houlton197211a2016-12-23 15:26:29 -07007388 ") in the RECORDING state. Must first call vkEndCommandBuffer(). %s",
7389 commandBuffer, validation_error_map[VALIDATION_ERROR_00103]);
Chris Forbese46e0a12016-12-20 11:33:11 +13007390 } else if (CB_RECORDED == cb_node->state || (CB_INVALID == cb_node->state && CMD_END == cb_node->last_cmd)) {
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007391 VkCommandPool cmdPool = cb_node->createInfo.commandPool;
Chris Forbesc25c8452016-06-21 14:32:00 +12007392 auto pPool = getCommandPoolNode(dev_data, cmdPool);
7393 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007394 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007395 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007396 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00105, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07007397 "Call to vkBeginCommandBuffer() on command buffer (0x%p"
Mark Muelleraab36502016-05-03 13:17:29 -06007398 ") attempts to implicitly reset cmdBuffer created from command pool (0x%" PRIxLEAST64
Dave Houlton197211a2016-12-23 15:26:29 -07007399 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7400 commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00105]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007401 }
7402 resetCB(dev_data, commandBuffer);
7403 }
7404 // Set updated state here in case implicit reset occurs above
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007405 cb_node->state = CB_RECORDING;
7406 cb_node->beginInfo = *pBeginInfo;
7407 if (cb_node->beginInfo.pInheritanceInfo) {
7408 cb_node->inheritanceInfo = *(cb_node->beginInfo.pInheritanceInfo);
7409 cb_node->beginInfo.pInheritanceInfo = &cb_node->inheritanceInfo;
Mark Young75c43592016-05-06 13:48:26 -06007410 // If we are a secondary command-buffer and inheriting. Update the items we should inherit.
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007411 if ((cb_node->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) &&
7412 (cb_node->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06007413 cb_node->activeRenderPass = getRenderPassState(dev_data, cb_node->beginInfo.pInheritanceInfo->renderPass);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007414 cb_node->activeSubpass = cb_node->beginInfo.pInheritanceInfo->subpass;
Tobin Ehlis3d916312016-11-03 07:26:28 -06007415 cb_node->activeFramebuffer = cb_node->beginInfo.pInheritanceInfo->framebuffer;
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -06007416 cb_node->framebuffers.insert(cb_node->beginInfo.pInheritanceInfo->framebuffer);
Mark Young75c43592016-05-06 13:48:26 -06007417 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007418 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007419 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007420 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007421 if (skip_call) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007422 return VK_ERROR_VALIDATION_FAILED_EXT;
7423 }
Chris Forbesaaa9c282016-10-03 20:01:14 +13007424 VkResult result = dev_data->dispatch_table.BeginCommandBuffer(commandBuffer, pBeginInfo);
Tobin Ehlis4c522322016-04-11 16:39:29 -06007425
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007426 return result;
7427}
7428
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007429VKAPI_ATTR VkResult VKAPI_CALL EndCommandBuffer(VkCommandBuffer commandBuffer) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007430 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007431 VkResult result = VK_SUCCESS;
7432 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007433 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007434 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7435 if (pCB) {
Dave Houlton197211a2016-12-23 15:26:29 -07007436 if ((VK_COMMAND_BUFFER_LEVEL_PRIMARY == pCB->createInfo.level) ||
7437 !(pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Cody Northrop3bb4d962016-05-09 16:15:57 -06007438 // This needs spec clarification to update valid usage, see comments in PR:
7439 // https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/pull/516#discussion_r63013756
Mike Weiblen6daea5b2016-12-19 20:41:58 -07007440 skip_call |= insideRenderPass(dev_data, pCB, "vkEndCommandBuffer()", VALIDATION_ERROR_00123);
Cody Northrop3bb4d962016-05-09 16:15:57 -06007441 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007442 skip_call |= ValidateCmd(dev_data, pCB, CMD_END, "vkEndCommandBuffer()");
7443 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_END);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007444 for (auto query : pCB->activeQueries) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007445 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Dave Houlton197211a2016-12-23 15:26:29 -07007446 VALIDATION_ERROR_00124, "DS",
7447 "Ending command buffer with in progress query: queryPool 0x%" PRIx64 ", index %d. %s",
7448 (uint64_t)(query.pool), query.index, validation_error_map[VALIDATION_ERROR_00124]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007449 }
7450 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06007451 if (!skip_call) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007452 lock.unlock();
Chris Forbesaaa9c282016-10-03 20:01:14 +13007453 result = dev_data->dispatch_table.EndCommandBuffer(commandBuffer);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007454 lock.lock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007455 if (VK_SUCCESS == result) {
7456 pCB->state = CB_RECORDED;
7457 // Reset CB status flags
7458 pCB->status = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007459 }
7460 } else {
7461 result = VK_ERROR_VALIDATION_FAILED_EXT;
7462 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007463 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007464 return result;
7465}
7466
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007467VKAPI_ATTR VkResult VKAPI_CALL
7468ResetCommandBuffer(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06007469 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007470 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007471 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007472 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7473 VkCommandPool cmdPool = pCB->createInfo.commandPool;
Chris Forbesc25c8452016-06-21 14:32:00 +12007474 auto pPool = getCommandPoolNode(dev_data, cmdPool);
7475 if (!(VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT & pPool->createFlags)) {
Tobin Ehlis739d62a2016-04-14 12:22:03 -06007476 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007477 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00093, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07007478 "Attempt to reset command buffer (0x%p) created from command pool (0x%" PRIxLEAST64
Dave Houlton197211a2016-12-23 15:26:29 -07007479 ") that does NOT have the VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT bit set. %s",
7480 commandBuffer, (uint64_t)cmdPool, validation_error_map[VALIDATION_ERROR_00093]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007481 }
Tobin Ehlis9a9f7a22016-10-20 07:43:15 -06007482 skip_call |= checkCommandBufferInFlight(dev_data, pCB, "reset", VALIDATION_ERROR_00092);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007483 lock.unlock();
Tobin Ehlis739d62a2016-04-14 12:22:03 -06007484 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007485 return VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbesaaa9c282016-10-03 20:01:14 +13007486 VkResult result = dev_data->dispatch_table.ResetCommandBuffer(commandBuffer, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007487 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007488 lock.lock();
Chris Forbese30fb982016-06-21 12:35:16 +12007489 dev_data->globalInFlightCmdBuffers.erase(commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007490 resetCB(dev_data, commandBuffer);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007491 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007492 }
7493 return result;
7494}
Mark Lobodzinski188b2302016-04-12 10:41:59 -06007495
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007496VKAPI_ATTR void VKAPI_CALL
7497CmdBindPipeline(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline) {
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007498 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007499 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007500 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007501 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
7502 if (cb_state) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007503 skip |= ValidateCmd(dev_data, cb_state, CMD_BINDPIPELINE, "vkCmdBindPipeline()");
7504 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_BINDPIPELINE);
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007505 if ((VK_PIPELINE_BIND_POINT_COMPUTE == pipelineBindPoint) && (cb_state->activeRenderPass)) {
7506 skip |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007507 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
7508 (uint64_t)pipeline, __LINE__, DRAWSTATE_INVALID_RENDERPASS_CMD, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06007509 "Incorrectly binding compute pipeline (0x%" PRIxLEAST64 ") during active RenderPass (0x%" PRIxLEAST64 ")",
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007510 (uint64_t)pipeline, (uint64_t)cb_state->activeRenderPass->renderPass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007511 }
Dave Houlton197211a2016-12-23 15:26:29 -07007512 // TODO: VALIDATION_ERROR_00594 VALIDATION_ERROR_00596
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007513
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007514 PIPELINE_STATE *pipe_state = getPipelineState(dev_data, pipeline);
7515 if (pipe_state) {
7516 cb_state->lastBound[pipelineBindPoint].pipeline_state = pipe_state;
7517 set_cb_pso_status(cb_state, pipe_state);
7518 set_pipeline_state(pipe_state);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007519 } else {
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007520 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT,
Dave Houlton197211a2016-12-23 15:26:29 -07007521 (uint64_t)pipeline, __LINE__, VALIDATION_ERROR_00600, "DS",
7522 "Attempt to bind Pipeline 0x%" PRIxLEAST64 " that doesn't exist! %s", (uint64_t)(pipeline),
7523 validation_error_map[VALIDATION_ERROR_00600]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007524 }
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007525 addCommandBufferBinding(&pipe_state->cb_bindings,
7526 {reinterpret_cast<uint64_t &>(pipeline), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT}, cb_state);
7527 if (VK_PIPELINE_BIND_POINT_GRAPHICS == pipelineBindPoint) {
7528 // Add binding for child renderpass
7529 auto rp_state = getRenderPassState(dev_data, pipe_state->graphicsPipelineCI.renderPass);
7530 if (rp_state) {
7531 addCommandBufferBinding(
7532 &rp_state->cb_bindings,
7533 {reinterpret_cast<uint64_t &>(rp_state->renderPass), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT}, cb_state);
7534 }
7535 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007536 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007537 lock.unlock();
Tobin Ehlisd3d07462016-10-26 14:30:06 -06007538 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007539 dev_data->dispatch_table.CmdBindPipeline(commandBuffer, pipelineBindPoint, pipeline);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007540}
7541
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007542VKAPI_ATTR void VKAPI_CALL
7543CmdSetViewport(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport *pViewports) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007544 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007545 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007546 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007547 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7548 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007549 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE, "vkCmdSetViewport()");
7550 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETVIEWPORTSTATE);
Chris Forbes5fc77832016-07-28 14:15:38 +12007551 pCB->viewportMask |= ((1u<<viewportCount) - 1u) << firstViewport;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007552 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007553 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007554 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007555 dev_data->dispatch_table.CmdSetViewport(commandBuffer, firstViewport, viewportCount, pViewports);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007556}
7557
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007558VKAPI_ATTR void VKAPI_CALL
7559CmdSetScissor(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D *pScissors) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007560 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007561 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007562 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007563 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7564 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007565 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSCISSORSTATE, "vkCmdSetScissor()");
7566 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSCISSORSTATE);
Chris Forbes5fc77832016-07-28 14:15:38 +12007567 pCB->scissorMask |= ((1u<<scissorCount) - 1u) << firstScissor;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007568 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007569 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007570 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007571 dev_data->dispatch_table.CmdSetScissor(commandBuffer, firstScissor, scissorCount, pScissors);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007572}
7573
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007574VKAPI_ATTR void VKAPI_CALL CmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) {
Mark Young7394fdd2016-03-31 14:56:43 -06007575 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007576 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007577 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007578 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7579 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007580 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE, "vkCmdSetLineWidth()");
7581 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETLINEWIDTHSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007582 pCB->status |= CBSTATUS_LINE_WIDTH_SET;
Mark Young7394fdd2016-03-31 14:56:43 -06007583
Tobin Ehlis52c76a32016-10-12 09:05:51 -06007584 PIPELINE_STATE *pPipeTrav = pCB->lastBound[VK_PIPELINE_BIND_POINT_GRAPHICS].pipeline_state;
Mark Young7394fdd2016-03-31 14:56:43 -06007585 if (pPipeTrav != NULL && !isDynamic(pPipeTrav, VK_DYNAMIC_STATE_LINE_WIDTH)) {
7586 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007587 reinterpret_cast<uint64_t &>(commandBuffer), __LINE__, VALIDATION_ERROR_01476, "DS",
baldurk209ccda2016-05-05 16:31:05 +02007588 "vkCmdSetLineWidth called but pipeline was created without VK_DYNAMIC_STATE_LINE_WIDTH "
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007589 "flag. This is undefined behavior and could be ignored. %s",
7590 validation_error_map[VALIDATION_ERROR_01476]);
Mark Young7394fdd2016-03-31 14:56:43 -06007591 } else {
7592 skip_call |= verifyLineWidth(dev_data, DRAWSTATE_INVALID_SET, reinterpret_cast<uint64_t &>(commandBuffer), lineWidth);
7593 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007594 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007595 lock.unlock();
Mark Young7394fdd2016-03-31 14:56:43 -06007596 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007597 dev_data->dispatch_table.CmdSetLineWidth(commandBuffer, lineWidth);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007598}
7599
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007600VKAPI_ATTR void VKAPI_CALL
7601CmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007602 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007603 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007604 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007605 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7606 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007607 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE, "vkCmdSetDepthBias()");
7608 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBIASSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007609 pCB->status |= CBSTATUS_DEPTH_BIAS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007610 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007611 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007612 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007613 dev_data->dispatch_table.CmdSetDepthBias(commandBuffer, depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007614}
7615
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007616VKAPI_ATTR void VKAPI_CALL CmdSetBlendConstants(VkCommandBuffer commandBuffer, const float blendConstants[4]) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007617 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007618 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007619 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007620 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7621 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007622 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETBLENDSTATE, "vkCmdSetBlendConstants()");
7623 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETBLENDSTATE);
Tobin Ehlis7a1d2352016-03-28 11:18:19 -06007624 pCB->status |= CBSTATUS_BLEND_CONSTANTS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007625 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007626 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007627 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007628 dev_data->dispatch_table.CmdSetBlendConstants(commandBuffer, blendConstants);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007629}
7630
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007631VKAPI_ATTR void VKAPI_CALL
7632CmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007633 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007634 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007635 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007636 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7637 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007638 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE, "vkCmdSetDepthBounds()");
7639 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETDEPTHBOUNDSSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007640 pCB->status |= CBSTATUS_DEPTH_BOUNDS_SET;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007641 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007642 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007643 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007644 dev_data->dispatch_table.CmdSetDepthBounds(commandBuffer, minDepthBounds, maxDepthBounds);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007645}
7646
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007647VKAPI_ATTR void VKAPI_CALL
7648CmdSetStencilCompareMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007649 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007650 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007651 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007652 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7653 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007654 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE, "vkCmdSetStencilCompareMask()");
7655 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREADMASKSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007656 pCB->status |= CBSTATUS_STENCIL_READ_MASK_SET;
7657 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007658 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007659 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007660 dev_data->dispatch_table.CmdSetStencilCompareMask(commandBuffer, faceMask, compareMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007661}
7662
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007663VKAPI_ATTR void VKAPI_CALL
7664CmdSetStencilWriteMask(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007665 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007666 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007667 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007668 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7669 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007670 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE, "vkCmdSetStencilWriteMask()");
7671 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILWRITEMASKSTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007672 pCB->status |= CBSTATUS_STENCIL_WRITE_MASK_SET;
7673 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007674 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007675 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007676 dev_data->dispatch_table.CmdSetStencilWriteMask(commandBuffer, faceMask, writeMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007677}
7678
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007679VKAPI_ATTR void VKAPI_CALL
7680CmdSetStencilReference(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007681 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007682 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007683 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007684 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7685 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007686 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE, "vkCmdSetStencilReference()");
7687 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETSTENCILREFERENCESTATE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007688 pCB->status |= CBSTATUS_STENCIL_REFERENCE_SET;
7689 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007690 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007691 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007692 dev_data->dispatch_table.CmdSetStencilReference(commandBuffer, faceMask, reference);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007693}
7694
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007695VKAPI_ATTR void VKAPI_CALL
7696CmdBindDescriptorSets(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout,
7697 uint32_t firstSet, uint32_t setCount, const VkDescriptorSet *pDescriptorSets, uint32_t dynamicOffsetCount,
7698 const uint32_t *pDynamicOffsets) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007699 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007700 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007701 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007702 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
7703 if (pCB) {
7704 if (pCB->state == CB_RECORDING) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007705 // Track total count of dynamic descriptor types to make sure we have an offset for each one
7706 uint32_t totalDynamicDescriptors = 0;
7707 string errorString = "";
7708 uint32_t lastSetIndex = firstSet + setCount - 1;
Chris Forbes0b03b932016-05-16 14:09:35 +12007709 if (lastSetIndex >= pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size()) {
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007710 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
Chris Forbes0b03b932016-05-16 14:09:35 +12007711 pCB->lastBound[pipelineBindPoint].dynamicOffsets.resize(lastSetIndex + 1);
7712 }
Tobin Ehlis09d16712016-05-17 10:41:55 -06007713 auto oldFinalBoundSet = pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex];
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06007714 auto pipeline_layout = getPipelineLayout(dev_data, layout);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007715 for (uint32_t i = 0; i < setCount; i++) {
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007716 cvdescriptorset::DescriptorSet *descriptor_set = getSetNode(dev_data, pDescriptorSets[i]);
7717 if (descriptor_set) {
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007718 pCB->lastBound[pipelineBindPoint].pipeline_layout = *pipeline_layout;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007719 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i + firstSet] = descriptor_set;
Tobin Ehlisfe871282016-06-28 10:28:02 -06007720 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT,
7721 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
Mike Weiblencce7ec72016-10-17 19:33:05 -06007722 DRAWSTATE_NONE, "DS", "Descriptor Set 0x%" PRIxLEAST64 " bound on pipeline %s",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007723 (uint64_t)pDescriptorSets[i], string_VkPipelineBindPoint(pipelineBindPoint));
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007724 if (!descriptor_set->IsUpdated() && (descriptor_set->GetTotalDescriptorCount() != 0)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007725 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
7726 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7727 DRAWSTATE_DESCRIPTOR_SET_NOT_UPDATED, "DS",
Mike Weiblencce7ec72016-10-17 19:33:05 -06007728 "Descriptor Set 0x%" PRIxLEAST64
Tobin Ehlisfe871282016-06-28 10:28:02 -06007729 " bound but it was never updated. You may want to either update it or not bind it.",
7730 (uint64_t)pDescriptorSets[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007731 }
Tobin Ehlis285a8282016-03-17 13:37:40 -06007732 // Verify that set being bound is compatible with overlapping setLayout of pipelineLayout
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007733 if (!verify_set_layout_compatibility(dev_data, descriptor_set, pipeline_layout, i + firstSet, errorString)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007734 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7735 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007736 VALIDATION_ERROR_00974, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007737 "descriptorSet #%u being bound is not compatible with overlapping descriptorSetLayout "
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007738 "at index %u of pipelineLayout 0x%" PRIxLEAST64 " due to: %s. %s",
7739 i, i + firstSet, reinterpret_cast<uint64_t &>(layout), errorString.c_str(),
7740 validation_error_map[VALIDATION_ERROR_00974]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007741 }
Chris Forbes0b03b932016-05-16 14:09:35 +12007742
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007743 auto setDynamicDescriptorCount = descriptor_set->GetDynamicDescriptorCount();
Chris Forbes0b03b932016-05-16 14:09:35 +12007744
7745 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i].clear();
7746
7747 if (setDynamicDescriptorCount) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007748 // First make sure we won't overstep bounds of pDynamicOffsets array
Chris Forbes0b03b932016-05-16 14:09:35 +12007749 if ((totalDynamicDescriptors + setDynamicDescriptorCount) > dynamicOffsetCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007750 skip_call |=
Tobin Ehlis285a8282016-03-17 13:37:40 -06007751 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7752 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
7753 DRAWSTATE_INVALID_DYNAMIC_OFFSET_COUNT, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -06007754 "descriptorSet #%u (0x%" PRIxLEAST64
Tobin Ehlis285a8282016-03-17 13:37:40 -06007755 ") requires %u dynamicOffsets, but only %u dynamicOffsets are left in pDynamicOffsets "
7756 "array. There must be one dynamic offset for each dynamic descriptor being bound.",
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007757 i, (uint64_t)pDescriptorSets[i], descriptor_set->GetDynamicDescriptorCount(),
Tobin Ehlis285a8282016-03-17 13:37:40 -06007758 (dynamicOffsetCount - totalDynamicDescriptors));
7759 } else { // Validate and store dynamic offsets with the set
7760 // Validate Dynamic Offset Minimums
7761 uint32_t cur_dyn_offset = totalDynamicDescriptors;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007762 for (uint32_t d = 0; d < descriptor_set->GetTotalDescriptorCount(); d++) {
7763 if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007764 if (vk_safe_modulo(
7765 pDynamicOffsets[cur_dyn_offset],
Tobin Ehlise54be7b2016-04-11 14:49:55 -06007766 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment) != 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007767 skip_call |= log_msg(
Tobin Ehlis285a8282016-03-17 13:37:40 -06007768 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007769 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7770 "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7771 "device limit minUniformBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
Tobin Ehlis285a8282016-03-17 13:37:40 -06007772 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007773 dev_data->phys_dev_properties.properties.limits.minUniformBufferOffsetAlignment,
7774 validation_error_map[VALIDATION_ERROR_00978]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007775 }
7776 cur_dyn_offset++;
Tobin Ehlis1a0afbf2016-12-29 12:40:16 -07007777 } else if (descriptor_set->GetTypeFromGlobalIndex(d) == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
Tobin Ehlis285a8282016-03-17 13:37:40 -06007778 if (vk_safe_modulo(
7779 pDynamicOffsets[cur_dyn_offset],
Tobin Ehlise54be7b2016-04-11 14:49:55 -06007780 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment) != 0) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007781 skip_call |= log_msg(
Tobin Ehlis285a8282016-03-17 13:37:40 -06007782 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007783 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, VALIDATION_ERROR_00978,
7784 "DS", "vkCmdBindDescriptorSets(): pDynamicOffsets[%d] is %d but must be a multiple of "
7785 "device limit minStorageBufferOffsetAlignment 0x%" PRIxLEAST64 ". %s",
Tobin Ehlis285a8282016-03-17 13:37:40 -06007786 cur_dyn_offset, pDynamicOffsets[cur_dyn_offset],
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007787 dev_data->phys_dev_properties.properties.limits.minStorageBufferOffsetAlignment,
7788 validation_error_map[VALIDATION_ERROR_00978]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007789 }
7790 cur_dyn_offset++;
7791 }
7792 }
Chris Forbes0b03b932016-05-16 14:09:35 +12007793
7794 pCB->lastBound[pipelineBindPoint].dynamicOffsets[firstSet + i] =
7795 std::vector<uint32_t>(pDynamicOffsets + totalDynamicDescriptors,
7796 pDynamicOffsets + totalDynamicDescriptors + setDynamicDescriptorCount);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007797 // Keep running total of dynamic descriptor count to verify at the end
Chris Forbes0b03b932016-05-16 14:09:35 +12007798 totalDynamicDescriptors += setDynamicDescriptorCount;
7799
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007800 }
7801 }
Tobin Ehlis285a8282016-03-17 13:37:40 -06007802 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007803 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
7804 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, (uint64_t)pDescriptorSets[i], __LINE__,
Mike Weiblencce7ec72016-10-17 19:33:05 -06007805 DRAWSTATE_INVALID_SET, "DS", "Attempt to bind descriptor set 0x%" PRIxLEAST64
7806 " that doesn't exist!",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007807 (uint64_t)pDescriptorSets[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007808 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007809 skip_call |= ValidateCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS, "vkCmdBindDescriptorSets()");
7810 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BINDDESCRIPTORSETS);
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007811 // For any previously bound sets, need to set them to "invalid" if they were disturbed by this update
7812 if (firstSet > 0) { // Check set #s below the first bound set
7813 for (uint32_t i = 0; i < firstSet; ++i) {
7814 if (pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] &&
Tobin Ehlis09d16712016-05-17 10:41:55 -06007815 !verify_set_layout_compatibility(dev_data, pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i],
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007816 pipeline_layout, i, errorString)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007817 skip_call |= log_msg(
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007818 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
7819 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
7820 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], __LINE__, DRAWSTATE_NONE, "DS",
Mike Weiblencce7ec72016-10-17 19:33:05 -06007821 "DescriptorSet 0x%" PRIxLEAST64
Mark Muelleraab36502016-05-03 13:17:29 -06007822 " previously bound as set #%u was disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007823 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i], i, (uint64_t)layout);
7824 pCB->lastBound[pipelineBindPoint].boundDescriptorSets[i] = VK_NULL_HANDLE;
7825 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007826 }
7827 }
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007828 // Check if newly last bound set invalidates any remaining bound sets
7829 if ((pCB->lastBound[pipelineBindPoint].boundDescriptorSets.size() - 1) > (lastSetIndex)) {
7830 if (oldFinalBoundSet &&
Tobin Ehlis0fc85672016-07-07 11:06:26 -06007831 !verify_set_layout_compatibility(dev_data, oldFinalBoundSet, pipeline_layout, lastSetIndex, errorString)) {
Tobin Ehlis09d16712016-05-17 10:41:55 -06007832 auto old_set = oldFinalBoundSet->GetSet();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007833 skip_call |=
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007834 log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
Tobin Ehlis09d16712016-05-17 10:41:55 -06007835 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, reinterpret_cast<uint64_t &>(old_set), __LINE__,
Mike Weiblencce7ec72016-10-17 19:33:05 -06007836 DRAWSTATE_NONE, "DS", "DescriptorSet 0x%" PRIxLEAST64
Mark Muelleraab36502016-05-03 13:17:29 -06007837 " previously bound as set #%u is incompatible with set 0x%" PRIxLEAST64
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007838 " newly bound as set #%u so set #%u and any subsequent sets were "
Mark Muelleraab36502016-05-03 13:17:29 -06007839 "disturbed by newly bound pipelineLayout (0x%" PRIxLEAST64 ")",
Tobin Ehlis09d16712016-05-17 10:41:55 -06007840 reinterpret_cast<uint64_t &>(old_set), lastSetIndex,
Tobin Ehlis223b01e2016-03-21 14:14:44 -06007841 (uint64_t)pCB->lastBound[pipelineBindPoint].boundDescriptorSets[lastSetIndex], lastSetIndex,
7842 lastSetIndex + 1, (uint64_t)layout);
7843 pCB->lastBound[pipelineBindPoint].boundDescriptorSets.resize(lastSetIndex + 1);
7844 }
7845 }
Tobin Ehlis285a8282016-03-17 13:37:40 -06007846 }
7847 // dynamicOffsetCount must equal the total number of dynamic descriptors in the sets being bound
7848 if (totalDynamicDescriptors != dynamicOffsetCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007849 skip_call |=
7850 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007851 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_00975, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -06007852 "Attempting to bind %u descriptorSets with %u dynamic descriptors, but dynamicOffsetCount "
Mike Weiblen1dae96f2016-12-23 14:00:22 -07007853 "is %u. It should exactly match the number of dynamic descriptors. %s",
7854 setCount, totalDynamicDescriptors, dynamicOffsetCount, validation_error_map[VALIDATION_ERROR_00975]);
Tobin Ehlis285a8282016-03-17 13:37:40 -06007855 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007856 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007857 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindDescriptorSets()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007858 }
7859 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007860 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007861 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007862 dev_data->dispatch_table.CmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, setCount,
7863 pDescriptorSets, dynamicOffsetCount, pDynamicOffsets);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007864}
7865
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007866VKAPI_ATTR void VKAPI_CALL
7867CmdBindIndexBuffer(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007868 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007869 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06007870 // TODO : Somewhere need to verify that IBs have correct usage state flagged
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007871 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06007872
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007873 auto buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007874 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007875 if (cb_node && buffer_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07007876 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindIndexBuffer()", VALIDATION_ERROR_02543);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007877 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007878 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindIndexBuffer()");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007879 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007880 cb_node->validate_functions.push_back(function);
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007881 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER, "vkCmdBindIndexBuffer()");
7882 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDINDEXBUFFER);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007883 VkDeviceSize offset_align = 0;
7884 switch (indexType) {
7885 case VK_INDEX_TYPE_UINT16:
7886 offset_align = 2;
7887 break;
7888 case VK_INDEX_TYPE_UINT32:
7889 offset_align = 4;
7890 break;
7891 default:
7892 // ParamChecker should catch bad enum, we'll also throw alignment error below if offset_align stays 0
7893 break;
7894 }
7895 if (!offset_align || (offset % offset_align)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007896 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
7897 DRAWSTATE_VTX_INDEX_ALIGNMENT_ERROR, "DS",
7898 "vkCmdBindIndexBuffer() offset (0x%" PRIxLEAST64 ") does not fall on alignment (%s) boundary.",
7899 offset, string_VkIndexType(indexType));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007900 }
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007901 cb_node->status |= CBSTATUS_INDEX_BUFFER_BOUND;
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007902 } else {
7903 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007904 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007905 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007906 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007907 dev_data->dispatch_table.CmdBindIndexBuffer(commandBuffer, buffer, offset, indexType);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007908}
7909
7910void updateResourceTracking(GLOBAL_CB_NODE *pCB, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer *pBuffers) {
7911 uint32_t end = firstBinding + bindingCount;
7912 if (pCB->currentDrawData.buffers.size() < end) {
7913 pCB->currentDrawData.buffers.resize(end);
7914 }
7915 for (uint32_t i = 0; i < bindingCount; ++i) {
7916 pCB->currentDrawData.buffers[i + firstBinding] = pBuffers[i];
7917 }
7918}
7919
Dustin Graves8f1eab92016-04-05 09:41:17 -06007920static inline void updateResourceTrackingOnDraw(GLOBAL_CB_NODE *pCB) { pCB->drawData.push_back(pCB->currentDrawData); }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007921
Chia-I Wu629d7cd2016-05-06 11:32:54 +08007922VKAPI_ATTR void VKAPI_CALL CmdBindVertexBuffers(VkCommandBuffer commandBuffer, uint32_t firstBinding,
7923 uint32_t bindingCount, const VkBuffer *pBuffers,
7924 const VkDeviceSize *pOffsets) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007925 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007926 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06007927 // TODO : Somewhere need to verify that VBs have correct usage state flagged
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007928 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski0dcf2722016-07-14 09:54:11 -06007929
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007930 auto cb_node = getCBNode(dev_data, commandBuffer);
7931 if (cb_node) {
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06007932 for (uint32_t i = 0; i < bindingCount; ++i) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007933 auto buffer_state = getBufferState(dev_data, pBuffers[i]);
7934 assert(buffer_state);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07007935 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, buffer_state, "vkCmdBindVertexBuffers()", VALIDATION_ERROR_02546);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007936 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007937 return ValidateBufferMemoryIsValid(dev_data, buffer_state, "vkCmdBindVertexBuffers()");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06007938 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007939 cb_node->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007940 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07007941 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER, "vkCmdBindVertexBuffer()");
7942 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BINDVERTEXBUFFER);
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06007943 updateResourceTracking(cb_node, firstBinding, bindingCount, pBuffers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007944 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06007945 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdBindVertexBuffer()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007946 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06007947 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06007948 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13007949 dev_data->dispatch_table.CmdBindVertexBuffers(commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007950}
7951
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -07007952// Expects global_lock to be held by caller
Tobin Ehlis5558ecb2016-12-19 15:16:37 -07007953static void MarkStoreImagesAndBuffersAsWritten(layer_data *dev_data, GLOBAL_CB_NODE *pCB) {
Tobin Ehlis2e319d42016-03-25 11:49:51 -06007954 for (auto imageView : pCB->updateImages) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06007955 auto view_state = getImageViewState(dev_data, imageView);
7956 if (!view_state)
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007957 continue;
Tobin Ehlis5611e922016-06-28 15:52:55 -06007958
Tobin Ehlis30df15c2016-10-12 17:17:57 -06007959 auto image_state = getImageState(dev_data, view_state->create_info.image);
7960 assert(image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007961 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06007962 SetImageMemoryValid(dev_data, image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007963 return false;
Tobin Ehlis2e319d42016-03-25 11:49:51 -06007964 };
7965 pCB->validate_functions.push_back(function);
7966 }
7967 for (auto buffer : pCB->updateBuffers) {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007968 auto buffer_state = getBufferState(dev_data, buffer);
7969 assert(buffer_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007970 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07007971 SetBufferMemoryValid(dev_data, buffer_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06007972 return false;
Tobin Ehlis2e319d42016-03-25 11:49:51 -06007973 };
7974 pCB->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007975 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007976}
Tobin Ehlisc96f8062016-03-09 16:12:48 -07007977
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07007978// Generic function to handle validation for all CmdDraw* type functions
Tobin Ehlis022528b2016-12-29 12:22:32 -07007979static bool ValidateCmdDrawType(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
7980 CMD_TYPE cmd_type, GLOBAL_CB_NODE **cb_state, const char *caller,
Jeremy Hayese2583052016-12-12 11:01:28 -07007981 UNIQUE_VALIDATION_ERROR_CODE msg_code, UNIQUE_VALIDATION_ERROR_CODE const dynamic_state_msg_code) {
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07007982 bool skip = false;
7983 *cb_state = getCBNode(dev_data, cmd_buffer);
7984 if (*cb_state) {
7985 skip |= ValidateCmd(dev_data, *cb_state, cmd_type, caller);
Jeremy Hayese2583052016-12-12 11:01:28 -07007986 skip |= ValidateDrawState(dev_data, *cb_state, indexed, bind_point, caller, dynamic_state_msg_code);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07007987 skip |= (VK_PIPELINE_BIND_POINT_GRAPHICS == bind_point) ? outsideRenderPass(dev_data, *cb_state, caller, msg_code)
7988 : insideRenderPass(dev_data, *cb_state, caller, msg_code);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07007989 }
7990 return skip;
7991}
7992
Tobin Ehlis2b82df82016-12-21 12:26:38 -07007993// Generic function to handle state update for all CmdDraw* and CmdDispatch* type functions
Tobin Ehlis022528b2016-12-29 12:22:32 -07007994static void UpdateStateCmdDrawDispatchType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
7995 CMD_TYPE cmd_type) {
7996 UpdateDrawState(dev_data, cb_state, bind_point);
Tobin Ehlis7afd7812016-12-28 14:24:47 -07007997 MarkStoreImagesAndBuffersAsWritten(dev_data, cb_state);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07007998 UpdateCmdBufferLastCmd(dev_data, cb_state, cmd_type);
7999}
8000
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008001// Generic function to handle state update for all CmdDraw* type functions
Tobin Ehlis022528b2016-12-29 12:22:32 -07008002static void UpdateStateCmdDrawType(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8003 CMD_TYPE cmd_type, DRAW_TYPE draw_type) {
8004 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, cmd_type);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008005 updateResourceTrackingOnDraw(cb_state);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008006 cb_state->drawCount[draw_type]++;
8007}
8008
Tobin Ehlis022528b2016-12-29 12:22:32 -07008009static bool PreCallValidateCmdDraw(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed, VkPipelineBindPoint bind_point,
8010 GLOBAL_CB_NODE **cb_state, const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008011 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAW, cb_state, caller, VALIDATION_ERROR_01365,
8012 VALIDATION_ERROR_02203);
Tobin Ehlis232017e2016-12-21 10:28:54 -07008013}
8014
Tobin Ehlis022528b2016-12-29 12:22:32 -07008015static void PostCallRecordCmdDraw(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8016 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAW, DRAW);
Tobin Ehlis18132402016-12-21 07:52:20 -07008017}
8018
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008019VKAPI_ATTR void VKAPI_CALL CmdDraw(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount,
8020 uint32_t firstVertex, uint32_t firstInstance) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008021 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis232017e2016-12-21 10:28:54 -07008022 GLOBAL_CB_NODE *cb_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008023 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis022528b2016-12-29 12:22:32 -07008024 bool skip = PreCallValidateCmdDraw(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state, "vkCmdDraw()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008025 lock.unlock();
Tobin Ehlis232017e2016-12-21 10:28:54 -07008026 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008027 dev_data->dispatch_table.CmdDraw(commandBuffer, vertexCount, instanceCount, firstVertex, firstInstance);
Tobin Ehlis18132402016-12-21 07:52:20 -07008028 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008029 PostCallRecordCmdDraw(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
Tobin Ehlis18132402016-12-21 07:52:20 -07008030 lock.unlock();
8031 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008032}
8033
Tobin Ehlis022528b2016-12-29 12:22:32 -07008034static bool PreCallValidateCmdDrawIndexed(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8035 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008036 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXED, cb_state, caller, VALIDATION_ERROR_01372,
8037 VALIDATION_ERROR_02216);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008038}
8039
Tobin Ehlis022528b2016-12-29 12:22:32 -07008040static void PostCallRecordCmdDrawIndexed(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8041 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXED, DRAW_INDEXED);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008042}
8043
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008044VKAPI_ATTR void VKAPI_CALL CmdDrawIndexed(VkCommandBuffer commandBuffer, uint32_t indexCount,
8045 uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008046 uint32_t firstInstance) {
8047 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008048 GLOBAL_CB_NODE *cb_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008049 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008050 bool skip = PreCallValidateCmdDrawIndexed(dev_data, commandBuffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008051 "vkCmdDrawIndexed()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008052 lock.unlock();
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008053 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008054 dev_data->dispatch_table.CmdDrawIndexed(commandBuffer, indexCount, instanceCount, firstIndex, vertexOffset, firstInstance);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008055 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008056 PostCallRecordCmdDrawIndexed(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS);
Tobin Ehlis3f826bf2016-12-21 11:04:13 -07008057 lock.unlock();
8058 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008059}
8060
Tobin Ehlis022528b2016-12-29 12:22:32 -07008061static bool PreCallValidateCmdDrawIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8062 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, BUFFER_STATE **buffer_state,
8063 const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008064 bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDIRECT, cb_state, caller,
8065 VALIDATION_ERROR_01381, VALIDATION_ERROR_02234);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008066 *buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008067 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02544);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008068 return skip;
8069}
8070
Tobin Ehlis022528b2016-12-29 12:22:32 -07008071static void PostCallRecordCmdDrawIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8072 BUFFER_STATE *buffer_state) {
8073 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDIRECT, DRAW_INDIRECT);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008074 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8075}
8076
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008077VKAPI_ATTR void VKAPI_CALL
8078CmdDrawIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008079 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008080 GLOBAL_CB_NODE *cb_state = nullptr;
8081 BUFFER_STATE *buffer_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008082 std::unique_lock<std::mutex> lock(global_lock);
Tony Barbour0725b0d2017-01-06 11:52:50 -07008083 bool skip = PreCallValidateCmdDrawIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_GRAPHICS, &cb_state,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008084 &buffer_state, "vkCmdDrawIndirect()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008085 lock.unlock();
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008086 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008087 dev_data->dispatch_table.CmdDrawIndirect(commandBuffer, buffer, offset, count, stride);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008088 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008089 PostCallRecordCmdDrawIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
Tobin Ehlis52179ad2016-12-21 11:21:36 -07008090 lock.unlock();
8091 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008092}
8093
Tobin Ehlis022528b2016-12-29 12:22:32 -07008094static bool PreCallValidateCmdDrawIndexedIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8095 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8096 BUFFER_STATE **buffer_state, const char *caller) {
8097 bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DRAWINDEXEDINDIRECT, cb_state, caller,
Jeremy Hayese2583052016-12-12 11:01:28 -07008098 VALIDATION_ERROR_01393, VALIDATION_ERROR_02272);
Tobin Ehlis46132632016-12-21 12:22:11 -07008099 *buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008100 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02545);
Tobin Ehlis46132632016-12-21 12:22:11 -07008101 return skip;
8102}
8103
Tobin Ehlis022528b2016-12-29 12:22:32 -07008104static void PostCallRecordCmdDrawIndexedIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8105 BUFFER_STATE *buffer_state) {
8106 UpdateStateCmdDrawType(dev_data, cb_state, bind_point, CMD_DRAWINDEXEDINDIRECT, DRAW_INDEXED_INDIRECT);
Tobin Ehlis46132632016-12-21 12:22:11 -07008107 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8108}
8109
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008110VKAPI_ATTR void VKAPI_CALL
8111CmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t count, uint32_t stride) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008112 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis46132632016-12-21 12:22:11 -07008113 GLOBAL_CB_NODE *cb_state = nullptr;
8114 BUFFER_STATE *buffer_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008115 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis46132632016-12-21 12:22:11 -07008116 bool skip = PreCallValidateCmdDrawIndexedIndirect(dev_data, commandBuffer, buffer, true, VK_PIPELINE_BIND_POINT_GRAPHICS,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008117 &cb_state, &buffer_state, "vkCmdDrawIndexedIndirect()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008118 lock.unlock();
Tobin Ehlis46132632016-12-21 12:22:11 -07008119 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008120 dev_data->dispatch_table.CmdDrawIndexedIndirect(commandBuffer, buffer, offset, count, stride);
Tobin Ehlis46132632016-12-21 12:22:11 -07008121 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008122 PostCallRecordCmdDrawIndexedIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_GRAPHICS, buffer_state);
Tobin Ehlis46132632016-12-21 12:22:11 -07008123 lock.unlock();
8124 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008125}
8126
Tobin Ehlis022528b2016-12-29 12:22:32 -07008127static bool PreCallValidateCmdDispatch(layer_data *dev_data, VkCommandBuffer cmd_buffer, bool indexed,
8128 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state, const char *caller) {
Jeremy Hayese2583052016-12-12 11:01:28 -07008129 return ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCH, cb_state, caller, VALIDATION_ERROR_01562,
8130 VALIDATION_ERROR_UNDEFINED);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008131}
8132
Tobin Ehlis022528b2016-12-29 12:22:32 -07008133static void PostCallRecordCmdDispatch(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point) {
8134 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCH);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008135}
8136
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008137VKAPI_ATTR void VKAPI_CALL CmdDispatch(VkCommandBuffer commandBuffer, uint32_t x, uint32_t y, uint32_t z) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008138 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008139 GLOBAL_CB_NODE *cb_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008140 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis022528b2016-12-29 12:22:32 -07008141 bool skip =
8142 PreCallValidateCmdDispatch(dev_data, commandBuffer, false, VK_PIPELINE_BIND_POINT_COMPUTE, &cb_state, "vkCmdDispatch()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008143 lock.unlock();
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008144 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008145 dev_data->dispatch_table.CmdDispatch(commandBuffer, x, y, z);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008146 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008147 PostCallRecordCmdDispatch(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE);
Tobin Ehlis2b82df82016-12-21 12:26:38 -07008148 lock.unlock();
8149 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008150}
8151
Tobin Ehlis022528b2016-12-29 12:22:32 -07008152static bool PreCallValidateCmdDispatchIndirect(layer_data *dev_data, VkCommandBuffer cmd_buffer, VkBuffer buffer, bool indexed,
8153 VkPipelineBindPoint bind_point, GLOBAL_CB_NODE **cb_state,
8154 BUFFER_STATE **buffer_state, const char *caller) {
8155 bool skip = ValidateCmdDrawType(dev_data, cmd_buffer, indexed, bind_point, CMD_DISPATCHINDIRECT, cb_state, caller,
Jeremy Hayese2583052016-12-12 11:01:28 -07008156 VALIDATION_ERROR_01569, VALIDATION_ERROR_UNDEFINED);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008157 *buffer_state = getBufferState(dev_data, buffer);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008158 skip |= ValidateMemoryIsBoundToBuffer(dev_data, *buffer_state, caller, VALIDATION_ERROR_02547);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008159 return skip;
8160}
8161
Tobin Ehlis022528b2016-12-29 12:22:32 -07008162static void PostCallRecordCmdDispatchIndirect(layer_data *dev_data, GLOBAL_CB_NODE *cb_state, VkPipelineBindPoint bind_point,
8163 BUFFER_STATE *buffer_state) {
8164 UpdateStateCmdDrawDispatchType(dev_data, cb_state, bind_point, CMD_DISPATCHINDIRECT);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008165 AddCommandBufferBindingBuffer(dev_data, cb_state, buffer_state);
8166}
8167
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008168VKAPI_ATTR void VKAPI_CALL
8169CmdDispatchIndirect(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008170 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008171 GLOBAL_CB_NODE *cb_state = nullptr;
8172 BUFFER_STATE *buffer_state = nullptr;
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008173 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis205f0032016-12-29 11:39:10 -07008174 bool skip = PreCallValidateCmdDispatchIndirect(dev_data, commandBuffer, buffer, false, VK_PIPELINE_BIND_POINT_COMPUTE,
Tobin Ehlis022528b2016-12-29 12:22:32 -07008175 &cb_state, &buffer_state, "vkCmdDispatchIndirect()");
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008176 lock.unlock();
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008177 if (!skip) {
Chris Forbesaaa9c282016-10-03 20:01:14 +13008178 dev_data->dispatch_table.CmdDispatchIndirect(commandBuffer, buffer, offset);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008179 lock.lock();
Tobin Ehlis022528b2016-12-29 12:22:32 -07008180 PostCallRecordCmdDispatchIndirect(dev_data, cb_state, VK_PIPELINE_BIND_POINT_COMPUTE, buffer_state);
Tobin Ehlis4c54bde2016-12-21 12:37:16 -07008181 lock.unlock();
8182 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008183}
8184
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008185VKAPI_ATTR void VKAPI_CALL CmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
8186 uint32_t regionCount, const VkBufferCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008187 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008188 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008189 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008190
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008191 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008192 auto src_buff_state = getBufferState(dev_data, srcBuffer);
8193 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8194 if (cb_node && src_buff_state && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008195 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02531);
8196 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyBuffer()", VALIDATION_ERROR_02532);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008197 // Update bindings between buffers and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008198 AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
8199 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008200 // Validate that SRC & DST buffers have correct usage flags set
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008201 skip_call |= ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008202 VALIDATION_ERROR_01164, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008203 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008204 VALIDATION_ERROR_01165, "vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008205
8206 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008207 return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBuffer()");
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008208 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008209 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008210 function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008211 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008212 return false;
8213 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008214 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008215
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008216 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
8217 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008218 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBuffer()", VALIDATION_ERROR_01172);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008219 } else {
8220 // Param_checker will flag errors on invalid objects, just assert here as debugging aid
8221 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008222 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008223 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008224 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008225 dev_data->dispatch_table.CmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008226}
8227
Tobin Ehlis67883822016-07-06 11:23:05 -06008228static bool VerifySourceImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage srcImage,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008229 VkImageSubresourceLayers subLayers, VkImageLayout srcImageLayout,
8230 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06008231 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008232
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008233 for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8234 uint32_t layer = i + subLayers.baseArrayLayer;
8235 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8236 IMAGE_CMD_BUF_LAYOUT_NODE node;
Tobin Ehlis67883822016-07-06 11:23:05 -06008237 if (!FindLayout(cb_node, srcImage, sub, node)) {
8238 SetLayout(cb_node, srcImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(srcImageLayout, srcImageLayout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008239 continue;
8240 }
8241 if (node.layout != srcImageLayout) {
8242 // TODO: Improve log message in the next pass
8243 skip_call |=
8244 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8245 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose source layout is %s "
8246 "and doesn't match the current layout %s.",
8247 string_VkImageLayout(srcImageLayout), string_VkImageLayout(node.layout));
8248 }
8249 }
8250 if (srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL) {
8251 if (srcImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
Tobin Ehlis4d686262016-07-06 11:32:12 -06008252 // TODO : Can we deal with image node from the top of call tree and avoid map look-up here?
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008253 auto image_state = getImageState(dev_data, srcImage);
8254 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
Tobin Ehlis4d686262016-07-06 11:32:12 -06008255 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8256 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8257 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8258 "Layout for input image should be TRANSFER_SRC_OPTIMAL instead of GENERAL.");
8259 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008260 } else {
8261 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008262 msgCode, "DS", "Layout for input image is %s but can only be TRANSFER_SRC_OPTIMAL or GENERAL. %s",
8263 string_VkImageLayout(srcImageLayout), validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008264 }
8265 }
8266 return skip_call;
8267}
8268
Tobin Ehlis67883822016-07-06 11:23:05 -06008269static bool VerifyDestImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage destImage,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008270 VkImageSubresourceLayers subLayers, VkImageLayout destImageLayout,
8271 UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Dustin Graves8f1eab92016-04-05 09:41:17 -06008272 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008273
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008274 for (uint32_t i = 0; i < subLayers.layerCount; ++i) {
8275 uint32_t layer = i + subLayers.baseArrayLayer;
8276 VkImageSubresource sub = {subLayers.aspectMask, subLayers.mipLevel, layer};
8277 IMAGE_CMD_BUF_LAYOUT_NODE node;
Tobin Ehlis67883822016-07-06 11:23:05 -06008278 if (!FindLayout(cb_node, destImage, sub, node)) {
8279 SetLayout(cb_node, destImage, sub, IMAGE_CMD_BUF_LAYOUT_NODE(destImageLayout, destImageLayout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008280 continue;
8281 }
8282 if (node.layout != destImageLayout) {
8283 skip_call |=
8284 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8285 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot copy from an image whose dest layout is %s and "
8286 "doesn't match the current layout %s.",
8287 string_VkImageLayout(destImageLayout), string_VkImageLayout(node.layout));
8288 }
8289 }
8290 if (destImageLayout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8291 if (destImageLayout == VK_IMAGE_LAYOUT_GENERAL) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008292 auto image_state = getImageState(dev_data, destImage);
8293 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
Tobin Ehlis4d686262016-07-06 11:32:12 -06008294 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
8295 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
8296 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8297 "Layout for output image should be TRANSFER_DST_OPTIMAL instead of GENERAL.");
8298 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008299 } else {
8300 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008301 msgCode, "DS", "Layout for output image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL. %s",
8302 string_VkImageLayout(destImageLayout), validation_error_map[msgCode]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008303 }
8304 }
8305 return skip_call;
8306}
8307
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008308static bool VerifyClearImageLayout(layer_data *dev_data, GLOBAL_CB_NODE *cb_node, VkImage image, VkImageSubresourceRange range,
Cort0ebd1082016-12-08 09:59:43 -08008309 VkImageLayout dest_image_layout, const char *func_name) {
8310 bool skip = false;
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008311
8312 VkImageSubresourceRange resolvedRange = range;
8313 ResolveRemainingLevelsLayers(dev_data, &resolvedRange, image);
8314
Cort0ebd1082016-12-08 09:59:43 -08008315 if (dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
8316 if (dest_image_layout == VK_IMAGE_LAYOUT_GENERAL) {
Cort830c7c12016-12-05 17:33:49 -08008317 auto image_state = getImageState(dev_data, image);
8318 if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
8319 // LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
Cort0ebd1082016-12-08 09:59:43 -08008320 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0,
8321 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
8322 "%s: Layout for cleared image should be TRANSFER_DST_OPTIMAL instead of GENERAL.", func_name);
Cort830c7c12016-12-05 17:33:49 -08008323 }
8324 } else {
Cort0ebd1082016-12-08 09:59:43 -08008325 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01086;
8326 if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
Cort830c7c12016-12-05 17:33:49 -08008327 error_code = VALIDATION_ERROR_01101;
8328 } else {
Cort0ebd1082016-12-08 09:59:43 -08008329 assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
Cort830c7c12016-12-05 17:33:49 -08008330 }
Cort0ebd1082016-12-08 09:59:43 -08008331 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8332 error_code, "DS", "%s: Layout for cleared image is %s but can only be "
8333 "TRANSFER_DST_OPTIMAL or GENERAL. %s",
8334 func_name, string_VkImageLayout(dest_image_layout), validation_error_map[error_code]);
Cort830c7c12016-12-05 17:33:49 -08008335 }
8336 }
8337
Karl Schultz537d8c22016-12-09 16:34:16 -07008338 for (uint32_t levelIdx = 0; levelIdx < resolvedRange.levelCount; ++levelIdx) {
8339 uint32_t level = levelIdx + resolvedRange.baseMipLevel;
8340 for (uint32_t layerIdx = 0; layerIdx < resolvedRange.layerCount; ++layerIdx) {
8341 uint32_t layer = layerIdx + resolvedRange.baseArrayLayer;
8342 VkImageSubresource sub = {resolvedRange.aspectMask, level, layer};
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008343 IMAGE_CMD_BUF_LAYOUT_NODE node;
8344 if (!FindLayout(cb_node, image, sub, node)) {
Cort0ebd1082016-12-08 09:59:43 -08008345 SetLayout(cb_node, image, sub, IMAGE_CMD_BUF_LAYOUT_NODE(dest_image_layout, dest_image_layout));
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008346 continue;
8347 }
Cort0ebd1082016-12-08 09:59:43 -08008348 if (node.layout != dest_image_layout) {
8349 UNIQUE_VALIDATION_ERROR_CODE error_code = VALIDATION_ERROR_01085;
8350 if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
Cort830c7c12016-12-05 17:33:49 -08008351 error_code = VALIDATION_ERROR_01100;
8352 } else {
Cort0ebd1082016-12-08 09:59:43 -08008353 assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
Cort830c7c12016-12-05 17:33:49 -08008354 }
Cort0ebd1082016-12-08 09:59:43 -08008355 skip |=
8356 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0,
8357 __LINE__, error_code, "DS", "%s: Cannot clear an image whose layout is %s and "
8358 "doesn't match the current layout %s. %s",
8359 func_name, string_VkImageLayout(dest_image_layout), string_VkImageLayout(node.layout),
8360 validation_error_map[error_code]);
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008361 }
8362 }
8363 }
8364
Cort0ebd1082016-12-08 09:59:43 -08008365 return skip;
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008366}
8367
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008368// Test if two VkExtent3D structs are equivalent
8369static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
8370 bool result = true;
8371 if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
8372 (extent->depth != other_extent->depth)) {
8373 result = false;
8374 }
8375 return result;
8376}
8377
8378// Returns the image extent of a specific subresource.
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008379static inline VkExtent3D GetImageSubresourceExtent(const IMAGE_STATE *img, const VkImageSubresourceLayers *subresource) {
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008380 const uint32_t mip = subresource->mipLevel;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008381 VkExtent3D extent = img->createInfo.extent;
Gregory Mitranoc7302232016-09-18 23:48:29 -04008382 extent.width = std::max(1U, extent.width >> mip);
8383 extent.height = std::max(1U, extent.height >> mip);
8384 extent.depth = std::max(1U, extent.depth >> mip);
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008385 return extent;
8386}
8387
8388// Test if the extent argument has all dimensions set to 0.
8389static inline bool IsExtentZero(const VkExtent3D *extent) {
8390 return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
8391}
8392
8393// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008394static inline VkExtent3D GetScaledItg(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const IMAGE_STATE *img) {
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008395 // Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
8396 VkExtent3D granularity = { 0, 0, 0 };
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008397 auto pPool = getCommandPoolNode(dev_data, cb_node->createInfo.commandPool);
8398 if (pPool) {
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008399 granularity = dev_data->phys_dev_properties.queue_family_properties[pPool->queueFamilyIndex].minImageTransferGranularity;
8400 if (vk_format_is_compressed(img->createInfo.format)) {
8401 auto block_size = vk_format_compressed_block_size(img->createInfo.format);
8402 granularity.width *= block_size.width;
8403 granularity.height *= block_size.height;
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008404 }
8405 }
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008406 return granularity;
8407}
8408
8409// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
8410static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
8411 bool valid = true;
8412 if ((vk_safe_modulo(extent->depth, granularity->depth) != 0) || (vk_safe_modulo(extent->width, granularity->width) != 0) ||
8413 (vk_safe_modulo(extent->height, granularity->height) != 0)) {
8414 valid = false;
8415 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008416 return valid;
8417}
8418
8419// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008420static inline bool CheckItgOffset(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkOffset3D *offset,
8421 const VkExtent3D *granularity, const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008422 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008423 VkExtent3D offset_extent = {};
8424 offset_extent.width = static_cast<uint32_t>(abs(offset->x));
8425 offset_extent.height = static_cast<uint32_t>(abs(offset->y));
8426 offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
8427 if (IsExtentZero(granularity)) {
8428 // If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
8429 if (IsExtentZero(&offset_extent) == false) {
8430 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8431 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8432 "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) "
8433 "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8434 function, i, member, offset->x, offset->y, offset->z);
8435 }
8436 } else {
8437 // If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
8438 // integer multiples of the image transfer granularity.
8439 if (IsExtentAligned(&offset_extent, granularity) == false) {
8440 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8441 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8442 "%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer "
8443 "multiples of this command buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
8444 function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
8445 granularity->depth);
8446 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008447 }
8448 return skip;
8449}
8450
8451// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008452static inline bool CheckItgExtent(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkExtent3D *extent,
8453 const VkOffset3D *offset, const VkExtent3D *granularity, const VkExtent3D *subresource_extent,
8454 const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008455 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008456 if (IsExtentZero(granularity)) {
8457 // If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
8458 // subresource extent.
8459 if (IsExtentEqual(extent, subresource_extent) == false) {
8460 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8461 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8462 "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
8463 "when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
8464 function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
8465 subresource_extent->height, subresource_extent->depth);
8466 }
8467 } else {
8468 // If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
8469 // integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
8470 // subresource extent dimensions.
8471 VkExtent3D offset_extent_sum = {};
8472 offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
8473 offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
8474 offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
8475 if ((IsExtentAligned(extent, granularity) == false) && (IsExtentEqual(&offset_extent_sum, subresource_extent) == false)) {
8476 skip |=
8477 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008478 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008479 "%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command buffer's "
8480 "queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
8481 "extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
8482 function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
8483 granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
8484 subresource_extent->width, subresource_extent->height, subresource_extent->depth);
8485 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008486 }
8487 return skip;
8488}
8489
8490// Check a uint32_t width or stride value against a queue family's Image Transfer Granularity width value
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008491static inline bool CheckItgInt(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const uint32_t value,
8492 const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008493 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008494 if (vk_safe_modulo(value, granularity) != 0) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008495 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8496 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008497 "%s: pRegion[%d].%s (%d) must be an even integer multiple of this command buffer's queue family image "
8498 "transfer granularity width (%d).",
8499 function, i, member, value, granularity);
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008500 }
8501 return skip;
8502}
8503
8504// Check a VkDeviceSize value against a queue family's Image Transfer Granularity width value
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008505static inline bool CheckItgSize(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node, const VkDeviceSize value,
8506 const uint32_t granularity, const uint32_t i, const char *function, const char *member) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008507 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008508 if (vk_safe_modulo(value, granularity) != 0) {
8509 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
8510 DRAWSTATE_IMAGE_TRANSFER_GRANULARITY, "DS",
8511 "%s: pRegion[%d].%s (%" PRIdLEAST64
8512 ") must be an even integer multiple of this command buffer's queue family image transfer "
8513 "granularity width (%d).",
8514 function, i, member, value, granularity);
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008515 }
8516 return skip;
8517}
8518
8519// Check valid usage Image Tranfer Granularity requirements for elements of a VkImageCopy structure
8520static inline bool ValidateCopyImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008521 const IMAGE_STATE *img, const VkImageCopy *region,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008522 const uint32_t i, const char *function) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008523 bool skip = false;
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008524 VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8525 skip |= CheckItgOffset(dev_data, cb_node, &region->srcOffset, &granularity, i, function, "srcOffset");
8526 skip |= CheckItgOffset(dev_data, cb_node, &region->dstOffset, &granularity, i, function, "dstOffset");
8527 VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->dstSubresource);
8528 skip |= CheckItgExtent(dev_data, cb_node, &region->extent, &region->dstOffset, &granularity, &subresource_extent, i, function,
8529 "extent");
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008530 return skip;
8531}
8532
8533// Check valid usage Image Tranfer Granularity requirements for elements of a VkBufferImageCopy structure
8534static inline bool ValidateCopyBufferImageTransferGranularityRequirements(layer_data *dev_data, const GLOBAL_CB_NODE *cb_node,
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008535 const IMAGE_STATE *img, const VkBufferImageCopy *region,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008536 const uint32_t i, const char *function) {
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008537 bool skip = false;
Mark Lobodzinskia4937292016-12-09 11:20:23 -07008538 if (vk_format_is_compressed(img->createInfo.format) == true) {
8539 // TODO: Add granularity checking for compressed formats
8540
8541 // bufferRowLength must be a multiple of the compressed texel block width
8542 // bufferImageHeight must be a multiple of the compressed texel block height
8543 // all members of imageOffset must be a multiple of the corresponding dimensions of the compressed texel block
8544 // bufferOffset must be a multiple of the compressed texel block size in bytes
8545 // imageExtent.width must be a multiple of the compressed texel block width or (imageExtent.width + imageOffset.x)
8546 // must equal the image subresource width
8547 // imageExtent.height must be a multiple of the compressed texel block height or (imageExtent.height + imageOffset.y)
8548 // must equal the image subresource height
8549 // imageExtent.depth must be a multiple of the compressed texel block depth or (imageExtent.depth + imageOffset.z)
8550 // must equal the image subresource depth
8551 } else {
8552 VkExtent3D granularity = GetScaledItg(dev_data, cb_node, img);
8553 skip |= CheckItgSize(dev_data, cb_node, region->bufferOffset, granularity.width, i, function, "bufferOffset");
8554 skip |= CheckItgInt(dev_data, cb_node, region->bufferRowLength, granularity.width, i, function, "bufferRowLength");
8555 skip |= CheckItgInt(dev_data, cb_node, region->bufferImageHeight, granularity.width, i, function, "bufferImageHeight");
8556 skip |= CheckItgOffset(dev_data, cb_node, &region->imageOffset, &granularity, i, function, "imageOffset");
8557 VkExtent3D subresource_extent = GetImageSubresourceExtent(img, &region->imageSubresource);
8558 skip |= CheckItgExtent(dev_data, cb_node, &region->imageExtent, &region->imageOffset, &granularity, &subresource_extent, i,
8559 function, "imageExtent");
8560 }
Mark Lobodzinski5b6d39e2016-08-22 09:21:03 -06008561 return skip;
8562}
8563
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008564VKAPI_ATTR void VKAPI_CALL
8565CmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8566 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008567 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008568 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008569 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008570
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008571 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008572 auto src_image_state = getImageState(dev_data, srcImage);
8573 auto dst_image_state = getImageState(dev_data, dstImage);
8574 if (cb_node && src_image_state && dst_image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008575 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02533);
8576 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyImage()", VALIDATION_ERROR_02534);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008577 // Update bindings between images and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008578 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8579 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008580 // Validate that SRC & DST images have correct usage flags set
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008581 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8582 VALIDATION_ERROR_01178, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8583 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8584 VALIDATION_ERROR_01181, "vkCmdCopyImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008585 std::function<bool()> function = [=]() {
8586 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImage()");
8587 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008588 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008589 function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008590 SetImageMemoryValid(dev_data, dst_image_state, true);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008591 return false;
8592 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008593 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008594
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008595 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGE, "vkCmdCopyImage()");
8596 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008597 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImage()", VALIDATION_ERROR_01194);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008598 for (uint32_t i = 0; i < regionCount; ++i) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008599 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].srcSubresource, srcImageLayout,
8600 VALIDATION_ERROR_01180);
8601 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].dstSubresource, dstImageLayout,
8602 VALIDATION_ERROR_01183);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008603 skip_call |= ValidateCopyImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008604 "vkCmdCopyImage()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008605 }
Tobin Ehlis5611e922016-06-28 15:52:55 -06008606 } else {
8607 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008608 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008609 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008610 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008611 dev_data->dispatch_table.CmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8612 pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008613}
8614
Mark Lobodzinski1850d072016-08-23 15:10:36 -06008615// Validate that an image's sampleCount matches the requirement for a specific API call
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008616static inline bool ValidateImageSampleCount(layer_data *dev_data, IMAGE_STATE *image_state, VkSampleCountFlagBits sample_count,
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008617 const char *location, UNIQUE_VALIDATION_ERROR_CODE msgCode) {
Mark Lobodzinski1850d072016-08-23 15:10:36 -06008618 bool skip = false;
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008619 if (image_state->createInfo.samples != sample_count) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008620 skip =
8621 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
8622 reinterpret_cast<uint64_t &>(image_state->image), 0, msgCode, "DS",
8623 "%s for image 0x%" PRIxLEAST64 " was created with a sample count of %s but must be %s. %s", location,
8624 reinterpret_cast<uint64_t &>(image_state->image), string_VkSampleCountFlagBits(image_state->createInfo.samples),
8625 string_VkSampleCountFlagBits(sample_count), validation_error_map[msgCode]);
Mark Lobodzinski1850d072016-08-23 15:10:36 -06008626 }
8627 return skip;
8628}
8629
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008630VKAPI_ATTR void VKAPI_CALL
8631CmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8632 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit *pRegions, VkFilter filter) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008633 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008634 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008635 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008636
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008637 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008638 auto src_image_state = getImageState(dev_data, srcImage);
8639 auto dst_image_state = getImageState(dev_data, dstImage);
8640 if (cb_node && src_image_state && dst_image_state) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008641 skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): srcImage",
8642 VALIDATION_ERROR_02194);
8643 skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT, "vkCmdBlitImage(): dstImage",
8644 VALIDATION_ERROR_02195);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008645 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02539);
8646 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdBlitImage()", VALIDATION_ERROR_02540);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008647 // Update bindings between images and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008648 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
8649 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008650 // Validate that SRC & DST images have correct usage flags set
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008651 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
8652 VALIDATION_ERROR_02182, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8653 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
8654 VALIDATION_ERROR_02186, "vkCmdBlitImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008655 std::function<bool()> function = [=]() {
8656 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdBlitImage()");
8657 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008658 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008659 function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008660 SetImageMemoryValid(dev_data, dst_image_state, true);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008661 return false;
8662 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008663 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008664
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008665 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BLITIMAGE, "vkCmdBlitImage()");
8666 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BLITIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008667 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBlitImage()", VALIDATION_ERROR_01300);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008668 } else {
8669 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008670 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008671 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008672 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008673 dev_data->dispatch_table.CmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
8674 pRegions, filter);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008675}
8676
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008677VKAPI_ATTR void VKAPI_CALL CmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer,
8678 VkImage dstImage, VkImageLayout dstImageLayout,
8679 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008680 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008681 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008682 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008683
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008684 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008685 auto src_buff_state = getBufferState(dev_data, srcBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008686 auto dst_image_state = getImageState(dev_data, dstImage);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008687 if (cb_node && src_buff_state && dst_image_state) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008688 skip_call |= ValidateImageSampleCount(dev_data, dst_image_state, VK_SAMPLE_COUNT_1_BIT,
8689 "vkCmdCopyBufferToImage(): dstImage", VALIDATION_ERROR_01232);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008690 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, src_buff_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02535);
8691 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_02536);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008692 AddCommandBufferBindingBuffer(dev_data, cb_node, src_buff_state);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008693 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008694 skip_call |=
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008695 ValidateBufferUsageFlags(dev_data, src_buff_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, VALIDATION_ERROR_01230,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008696 "vkCmdCopyBufferToImage()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008697 skip_call |= ValidateImageUsageFlags(dev_data, dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008698 VALIDATION_ERROR_01231, "vkCmdCopyBufferToImage()", "VK_IMAGE_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008699 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008700 SetImageMemoryValid(dev_data, dst_image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008701 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008702 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008703 cb_node->validate_functions.push_back(function);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008704 function = [=]() { return ValidateBufferMemoryIsValid(dev_data, src_buff_state, "vkCmdCopyBufferToImage()"); };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008705 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008706
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008707 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE, "vkCmdCopyBufferToImage()");
8708 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYBUFFERTOIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008709 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyBufferToImage()", VALIDATION_ERROR_01242);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008710 for (uint32_t i = 0; i < regionCount; ++i) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008711 skip_call |= VerifyDestImageLayout(dev_data, cb_node, dstImage, pRegions[i].imageSubresource, dstImageLayout,
8712 VALIDATION_ERROR_01234);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008713 skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, dst_image_state, &pRegions[i], i,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008714 "vkCmdCopyBufferToImage()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008715 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008716 } else {
8717 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008718 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008719 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008720 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008721 dev_data->dispatch_table.CmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008722}
8723
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008724VKAPI_ATTR void VKAPI_CALL CmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage,
8725 VkImageLayout srcImageLayout, VkBuffer dstBuffer,
8726 uint32_t regionCount, const VkBufferImageCopy *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008727 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008728 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008729 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008730
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008731 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008732 auto src_image_state = getImageState(dev_data, srcImage);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008733 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8734 if (cb_node && src_image_state && dst_buff_state) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008735 skip_call |= ValidateImageSampleCount(dev_data, src_image_state, VK_SAMPLE_COUNT_1_BIT,
8736 "vkCmdCopyImageToBuffer(): srcImage", VALIDATION_ERROR_01249);
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008737 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02537);
8738 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_02538);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008739 // Update bindings between buffer/image and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008740 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008741 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008742 // Validate that SRC image & DST buffer have correct usage flags set
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008743 skip_call |= ValidateImageUsageFlags(dev_data, src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008744 VALIDATION_ERROR_01248, "vkCmdCopyImageToBuffer()", "VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
8745 skip_call |=
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008746 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01252,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008747 "vkCmdCopyImageToBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008748 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008749 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdCopyImageToBuffer()");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008750 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008751 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008752 function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008753 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008754 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008755 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008756 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008757
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008758 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER, "vkCmdCopyImageToBuffer()");
8759 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYIMAGETOBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008760 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyImageToBuffer()", VALIDATION_ERROR_01260);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008761 for (uint32_t i = 0; i < regionCount; ++i) {
Mike Weiblen1dae96f2016-12-23 14:00:22 -07008762 skip_call |= VerifySourceImageLayout(dev_data, cb_node, srcImage, pRegions[i].imageSubresource, srcImageLayout,
8763 VALIDATION_ERROR_01251);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008764 skip_call |= ValidateCopyBufferImageTransferGranularityRequirements(dev_data, cb_node, src_image_state, &pRegions[i], i,
Gregory Mitranoceb36d92016-09-18 21:54:49 -04008765 "CmdCopyImageToBuffer");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008766 }
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008767 } else {
8768 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008769 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008770 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008771 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008772 dev_data->dispatch_table.CmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008773}
8774
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008775VKAPI_ATTR void VKAPI_CALL CmdUpdateBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer,
8776 VkDeviceSize dstOffset, VkDeviceSize dataSize, const uint32_t *pData) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008777 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008778 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008779 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008780
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008781 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008782 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8783 if (cb_node && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008784 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdUpdateBuffer()", VALIDATION_ERROR_02530);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008785 // Update bindings between buffer and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008786 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008787 // Validate that DST buffer has correct usage flags set
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008788 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008789 VALIDATION_ERROR_01146, "vkCmdUpdateBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008790 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008791 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008792 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008793 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008794 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008795
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008796 skip_call |= ValidateCmd(dev_data, cb_node, CMD_UPDATEBUFFER, "vkCmdUpdateBuffer()");
8797 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_UPDATEBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008798 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdUpdateBuffer()", VALIDATION_ERROR_01155);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008799 } else {
8800 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008801 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008802 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008803 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008804 dev_data->dispatch_table.CmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, dataSize, pData);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008805}
8806
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008807VKAPI_ATTR void VKAPI_CALL
8808CmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008809 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008810 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008811 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008812
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008813 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008814 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
8815 if (cb_node && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008816 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdFillBuffer()", VALIDATION_ERROR_02529);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008817 // Update bindings between buffer and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008818 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008819 // Validate that DST buffer has correct usage flags set
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008820 skip_call |= ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07008821 VALIDATION_ERROR_01137, "vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06008822 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07008823 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008824 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008825 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008826 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008827
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008828 skip_call |= ValidateCmd(dev_data, cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
8829 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_FILLBUFFER);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008830 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdFillBuffer()", VALIDATION_ERROR_01142);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06008831 } else {
8832 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008833 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008834 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008835 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008836 dev_data->dispatch_table.CmdFillBuffer(commandBuffer, dstBuffer, dstOffset, size, data);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008837}
8838
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008839VKAPI_ATTR void VKAPI_CALL CmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
8840 const VkClearAttachment *pAttachments, uint32_t rectCount,
8841 const VkClearRect *pRects) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008842 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008843 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008844 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008845 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
8846 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008847 skip_call |= ValidateCmd(dev_data, pCB, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
8848 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_CLEARATTACHMENTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008849 // Warn if this is issued prior to Draw Cmd and clearing the entire attachment
8850 if (!hasDrawCmd(pCB) && (pCB->activeRenderPassBeginInfo.renderArea.extent.width == pRects[0].rect.extent.width) &&
8851 (pCB->activeRenderPassBeginInfo.renderArea.extent.height == pRects[0].rect.extent.height)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008852 // There are times where app needs to use ClearAttachments (generally when reusing a buffer inside of a render pass)
8853 // Can we make this warning more specific? I'd like to avoid triggering this test if we can tell it's a use that must
8854 // call CmdClearAttachments
8855 // Otherwise this seems more like a performance warning.
Tobin Ehlisfe871282016-06-28 10:28:02 -06008856 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
Tobin Ehlis50b6c172016-12-22 10:42:36 -07008857 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t &>(commandBuffer), 0,
8858 DRAWSTATE_CLEAR_CMD_BEFORE_DRAW, "DS",
8859 "vkCmdClearAttachments() issued on command buffer object 0x%p prior to any Draw Cmds."
Tobin Ehlisfe871282016-06-28 10:28:02 -06008860 " It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -07008861 commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008862 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008863 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdClearAttachments()", VALIDATION_ERROR_01122);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008864 }
8865
8866 // Validate that attachment is in reference list of active subpass
8867 if (pCB->activeRenderPass) {
Chris Forbesef730462016-09-27 12:03:31 +13008868 const VkRenderPassCreateInfo *pRPCI = pCB->activeRenderPass->createInfo.ptr();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008869 const VkSubpassDescription *pSD = &pRPCI->pSubpasses[pCB->activeSubpass];
Chris Forbes34a09d42016-10-26 12:40:31 +13008870 auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008871
Chris Forbes7f498352016-10-25 16:32:54 +13008872 for (uint32_t i = 0; i < attachmentCount; i++) {
8873 auto clear_desc = &pAttachments[i];
Chris Forbes34a09d42016-10-26 12:40:31 +13008874 VkImageView image_view = VK_NULL_HANDLE;
8875
Chris Forbes7f498352016-10-25 16:32:54 +13008876 if (clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
8877 if (clear_desc->colorAttachment >= pSD->colorAttachmentCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008878 skip_call |= log_msg(
Chris Forbesda6ae6f2016-09-09 14:36:33 +12008879 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbese7629452016-10-27 09:32:34 +13008880 (uint64_t)commandBuffer, __LINE__, VALIDATION_ERROR_01114, "DS",
8881 "vkCmdClearAttachments() color attachment index %d out of range for active subpass %d. %s",
8882 clear_desc->colorAttachment, pCB->activeSubpass, validation_error_map[VALIDATION_ERROR_01114]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008883 }
Chris Forbes7f498352016-10-25 16:32:54 +13008884 else if (pSD->pColorAttachments[clear_desc->colorAttachment].attachment == VK_ATTACHMENT_UNUSED) {
Chris Forbese9b624e2016-09-08 15:30:59 +12008885 skip_call |= log_msg(
Chris Forbese7629452016-10-27 09:32:34 +13008886 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Chris Forbese9b624e2016-09-08 15:30:59 +12008887 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
Chris Forbese7629452016-10-27 09:32:34 +13008888 "vkCmdClearAttachments() color attachment index %d is VK_ATTACHMENT_UNUSED; ignored.",
Chris Forbes7f498352016-10-25 16:32:54 +13008889 clear_desc->colorAttachment);
Chris Forbese9b624e2016-09-08 15:30:59 +12008890 }
Chris Forbes34a09d42016-10-26 12:40:31 +13008891 else {
8892 image_view = framebuffer->createInfo.pAttachments[pSD->pColorAttachments[clear_desc->colorAttachment].attachment];
8893 }
Chris Forbes7f498352016-10-25 16:32:54 +13008894 } else if (clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008895 if (!pSD->pDepthStencilAttachment || // Says no DS will be used in active subpass
8896 (pSD->pDepthStencilAttachment->attachment ==
8897 VK_ATTACHMENT_UNUSED)) { // Says no DS will be used in active subpass
8898
Tobin Ehlisfe871282016-06-28 10:28:02 -06008899 skip_call |= log_msg(
Chris Forbese7629452016-10-27 09:32:34 +13008900 dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008901 (uint64_t)commandBuffer, __LINE__, DRAWSTATE_MISSING_ATTACHMENT_REFERENCE, "DS",
Chris Forbes98198eb2016-09-08 15:35:36 +12008902 "vkCmdClearAttachments() depth/stencil clear with no depth/stencil attachment in subpass; ignored");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008903 }
Chris Forbes34a09d42016-10-26 12:40:31 +13008904 else {
8905 image_view = framebuffer->createInfo.pAttachments[pSD->pDepthStencilAttachment->attachment];
8906 }
8907 }
8908
8909 if (image_view) {
8910 auto image_view_state = getImageViewState(dev_data, image_view);
8911 auto aspects_present = image_view_state->create_info.subresourceRange.aspectMask;
8912 auto extra_aspects = clear_desc->aspectMask & ~aspects_present;
Tobin Ehlis48fbd692017-01-04 09:17:01 -07008913 // TODO: This is a different check than 01125. Need a new valid usage statement for this case, or should kill check.
Chris Forbes34a09d42016-10-26 12:40:31 +13008914 if (extra_aspects) {
8915 skip_call |= log_msg(
8916 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT,
Chris Forbese7629452016-10-27 09:32:34 +13008917 reinterpret_cast<uint64_t &>(image_view), __LINE__, VALIDATION_ERROR_01125, "DS",
8918 "vkCmdClearAttachments() with aspects not present in image view: %s. %s",
8919 string_VkImageAspectFlagBits((VkImageAspectFlagBits)extra_aspects),
8920 validation_error_map[VALIDATION_ERROR_01125]);
Chris Forbes34a09d42016-10-26 12:40:31 +13008921 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008922 }
8923 }
8924 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008925 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008926 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008927 dev_data->dispatch_table.CmdClearAttachments(commandBuffer, attachmentCount, pAttachments, rectCount, pRects);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008928}
8929
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008930VKAPI_ATTR void VKAPI_CALL CmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image,
8931 VkImageLayout imageLayout, const VkClearColorValue *pColor,
8932 uint32_t rangeCount, const VkImageSubresourceRange *pRanges) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008933 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008934 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008935 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008936 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
Tobin Ehlis5611e922016-06-28 15:52:55 -06008937
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008938 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008939 auto image_state = getImageState(dev_data, image);
8940 if (cb_node && image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008941 skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearColorImage()", VALIDATION_ERROR_02527);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008942 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008943 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008944 SetImageMemoryValid(dev_data, image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008945 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008946 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008947 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008948
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008949 skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
8950 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARCOLORIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008951 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearColorImage()", VALIDATION_ERROR_01096);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008952 } else {
8953 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008954 }
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008955 for (uint32_t i = 0; i < rangeCount; ++i) {
Cort830c7c12016-12-05 17:33:49 -08008956 skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearColorImage()");
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008957 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008958 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008959 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008960 dev_data->dispatch_table.CmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008961}
8962
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008963VKAPI_ATTR void VKAPI_CALL
8964CmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
8965 const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
8966 const VkImageSubresourceRange *pRanges) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06008967 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008968 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008969 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008970 // TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
Tobin Ehlis5611e922016-06-28 15:52:55 -06008971
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008972 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008973 auto image_state = getImageState(dev_data, image);
8974 if (cb_node && image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07008975 skip_call |= ValidateMemoryIsBoundToImage(dev_data, image_state, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_02528);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008976 AddCommandBufferBindingImage(dev_data, cb_node, image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008977 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06008978 SetImageMemoryValid(dev_data, image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06008979 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008980 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06008981 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06008982
Tobin Ehlis1c883a02016-12-19 15:59:16 -07008983 skip_call |= ValidateCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
8984 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_CLEARDEPTHSTENCILIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07008985 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdClearDepthStencilImage()", VALIDATION_ERROR_01111);
Tobin Ehlis5611e922016-06-28 15:52:55 -06008986 } else {
8987 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008988 }
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008989 for (uint32_t i = 0; i < rangeCount; ++i) {
Cort830c7c12016-12-05 17:33:49 -08008990 skip_call |= VerifyClearImageLayout(dev_data, cb_node, image, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
Slawomir Cygan4f73b7f2016-11-28 19:17:38 +01008991 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06008992 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06008993 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13008994 dev_data->dispatch_table.CmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07008995}
8996
Chia-I Wu629d7cd2016-05-06 11:32:54 +08008997VKAPI_ATTR void VKAPI_CALL
8998CmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
8999 VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve *pRegions) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009000 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009001 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009002 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009003
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009004 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009005 auto src_image_state = getImageState(dev_data, srcImage);
9006 auto dst_image_state = getImageState(dev_data, dstImage);
9007 if (cb_node && src_image_state && dst_image_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07009008 skip_call |= ValidateMemoryIsBoundToImage(dev_data, src_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02541);
9009 skip_call |= ValidateMemoryIsBoundToImage(dev_data, dst_image_state, "vkCmdResolveImage()", VALIDATION_ERROR_02542);
Tobin Ehlis5611e922016-06-28 15:52:55 -06009010 // Update bindings between images and cmd buffer
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009011 AddCommandBufferBindingImage(dev_data, cb_node, src_image_state);
9012 AddCommandBufferBindingImage(dev_data, cb_node, dst_image_state);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009013 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009014 return ValidateImageMemoryIsValid(dev_data, src_image_state, "vkCmdResolveImage()");
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009015 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009016 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009017 function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009018 SetImageMemoryValid(dev_data, dst_image_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009019 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009020 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009021 cb_node->validate_functions.push_back(function);
Tobin Ehlisd9867fc2016-05-12 16:57:14 -06009022
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009023 skip_call |= ValidateCmd(dev_data, cb_node, CMD_RESOLVEIMAGE, "vkCmdResolveImage()");
9024 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_RESOLVEIMAGE);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009025 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdResolveImage()", VALIDATION_ERROR_01335);
Tobin Ehlis5611e922016-06-28 15:52:55 -06009026 } else {
9027 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009028 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009029 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009030 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009031 dev_data->dispatch_table.CmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
9032 pRegions);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009033}
9034
Michael Lentineb653eb22016-03-18 14:11:44 -05009035bool setEventStageMask(VkQueue queue, VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
9036 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9037 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9038 if (pCB) {
9039 pCB->eventToStageMap[event] = stageMask;
9040 }
9041 auto queue_data = dev_data->queueMap.find(queue);
9042 if (queue_data != dev_data->queueMap.end()) {
9043 queue_data->second.eventToStageMap[event] = stageMask;
9044 }
9045 return false;
9046}
9047
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009048VKAPI_ATTR void VKAPI_CALL
9049CmdSetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009050 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009051 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009052 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009053 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9054 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009055 skip_call |= ValidateCmd(dev_data, pCB, CMD_SETEVENT, "vkCmdSetEvent()");
9056 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_SETEVENT);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009057 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdSetEvent()", VALIDATION_ERROR_00238);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009058 auto event_state = getEventNode(dev_data, event);
9059 if (event_state) {
9060 addCommandBufferBinding(&event_state->cb_bindings,
Tobin Ehlisb073d992016-07-07 16:47:10 -06009061 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009062 event_state->cb_bindings.insert(pCB);
Tobin Ehlisb073d992016-07-07 16:47:10 -06009063 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009064 pCB->events.push_back(event);
Michael Lentine860b0fe2016-05-20 10:14:00 -05009065 if (!pCB->waitedEvents.count(event)) {
9066 pCB->writeEventsBeforeWait.push_back(event);
9067 }
Michael Lentineb653eb22016-03-18 14:11:44 -05009068 std::function<bool(VkQueue)> eventUpdate =
9069 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, stageMask);
9070 pCB->eventUpdates.push_back(eventUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009071 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009072 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009073 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009074 dev_data->dispatch_table.CmdSetEvent(commandBuffer, event, stageMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009075}
9076
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009077VKAPI_ATTR void VKAPI_CALL
9078CmdResetEvent(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009079 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009080 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009081 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009082 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9083 if (pCB) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009084 skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETEVENT, "vkCmdResetEvent()");
9085 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETEVENT);
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009086 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetEvent()", VALIDATION_ERROR_00249);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009087 auto event_state = getEventNode(dev_data, event);
9088 if (event_state) {
9089 addCommandBufferBinding(&event_state->cb_bindings,
Tobin Ehlisb073d992016-07-07 16:47:10 -06009090 {reinterpret_cast<uint64_t &>(event), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT}, pCB);
Tobin Ehlis1af17132016-10-20 14:17:21 -06009091 event_state->cb_bindings.insert(pCB);
Tobin Ehlisb073d992016-07-07 16:47:10 -06009092 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009093 pCB->events.push_back(event);
Michael Lentine860b0fe2016-05-20 10:14:00 -05009094 if (!pCB->waitedEvents.count(event)) {
9095 pCB->writeEventsBeforeWait.push_back(event);
9096 }
Michael Lentineb653eb22016-03-18 14:11:44 -05009097 std::function<bool(VkQueue)> eventUpdate =
9098 std::bind(setEventStageMask, std::placeholders::_1, commandBuffer, event, VkPipelineStageFlags(0));
9099 pCB->eventUpdates.push_back(eventUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009100 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009101 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009102 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009103 dev_data->dispatch_table.CmdResetEvent(commandBuffer, event, stageMask);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009104}
9105
Alex Smithc4659e42017-01-10 09:51:22 +00009106static bool TransitionImageAspectLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkImageMemoryBarrier *mem_barrier,
9107 uint32_t level, uint32_t layer, VkImageAspectFlags aspect)
9108{
9109 if (!(mem_barrier->subresourceRange.aspectMask & aspect)) {
9110 return false;
9111 }
9112 VkImageSubresource sub = {aspect, level, layer};
9113 IMAGE_CMD_BUF_LAYOUT_NODE node;
9114 if (!FindLayout(pCB, mem_barrier->image, sub, node)) {
9115 SetLayout(pCB, mem_barrier->image, sub,
9116 IMAGE_CMD_BUF_LAYOUT_NODE(mem_barrier->oldLayout, mem_barrier->newLayout));
9117 return false;
9118 }
9119 bool skip = false;
9120 if (mem_barrier->oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
9121 // TODO: Set memory invalid which is in mem_tracker currently
9122 } else if (node.layout != mem_barrier->oldLayout) {
9123 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9124 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
9125 "You cannot transition the layout of aspect %d from %s when current layout is %s.",
9126 aspect, string_VkImageLayout(mem_barrier->oldLayout), string_VkImageLayout(node.layout));
9127 }
9128 SetLayout(pCB, mem_barrier->image, sub, mem_barrier->newLayout);
9129 return skip;
9130}
9131
Mark Lobodzinski0c5e8042016-12-12 08:33:13 -07009132// TODO: Separate validation and layout state updates
Dustin Graves8f1eab92016-04-05 09:41:17 -06009133static bool TransitionImageLayouts(VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9134 const VkImageMemoryBarrier *pImgMemBarriers) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009135 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9136 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009137 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009138 uint32_t levelCount = 0;
9139 uint32_t layerCount = 0;
9140
9141 for (uint32_t i = 0; i < memBarrierCount; ++i) {
9142 auto mem_barrier = &pImgMemBarriers[i];
9143 if (!mem_barrier)
9144 continue;
9145 // TODO: Do not iterate over every possibility - consolidate where
9146 // possible
9147 ResolveRemainingLevelsLayers(dev_data, &levelCount, &layerCount, mem_barrier->subresourceRange, mem_barrier->image);
9148
9149 for (uint32_t j = 0; j < levelCount; j++) {
9150 uint32_t level = mem_barrier->subresourceRange.baseMipLevel + j;
9151 for (uint32_t k = 0; k < layerCount; k++) {
9152 uint32_t layer = mem_barrier->subresourceRange.baseArrayLayer + k;
Alex Smithc4659e42017-01-10 09:51:22 +00009153 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_COLOR_BIT);
9154 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_DEPTH_BIT);
9155 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_STENCIL_BIT);
9156 skip |= TransitionImageAspectLayout(dev_data, pCB, mem_barrier, level, layer, VK_IMAGE_ASPECT_METADATA_BIT);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009157 }
9158 }
9159 }
9160 return skip;
9161}
9162
9163// Print readable FlagBits in FlagMask
Dustin Graves8f1eab92016-04-05 09:41:17 -06009164static std::string string_VkAccessFlags(VkAccessFlags accessMask) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009165 std::string result;
9166 std::string separator;
9167
9168 if (accessMask == 0) {
9169 result = "[None]";
9170 } else {
9171 result = "[";
9172 for (auto i = 0; i < 32; i++) {
9173 if (accessMask & (1 << i)) {
9174 result = result + separator + string_VkAccessFlagBits((VkAccessFlagBits)(1 << i));
9175 separator = " | ";
9176 }
9177 }
9178 result = result + "]";
9179 }
9180 return result;
9181}
9182
9183// AccessFlags MUST have 'required_bit' set, and may have one or more of 'optional_bits' set.
9184// If required_bit is zero, accessMask must have at least one of 'optional_bits' set
9185// TODO: Add tracking to ensure that at least one barrier has been set for these layout transitions
Dustin Graves8f1eab92016-04-05 09:41:17 -06009186static bool ValidateMaskBits(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9187 const VkImageLayout &layout, VkAccessFlags required_bit, VkAccessFlags optional_bits,
9188 const char *type) {
9189 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009190
9191 if ((accessMask & required_bit) || (!required_bit && (accessMask & optional_bits))) {
Chris Forbes9b21a852016-05-06 18:07:09 +12009192 if (accessMask & ~(required_bit | optional_bits)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009193 // TODO: Verify against Valid Use
9194 skip_call |=
9195 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlis6fb205c2016-05-23 13:45:36 -06009196 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009197 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9198 }
9199 } else {
9200 if (!required_bit) {
Michael Lentine02508232016-04-13 17:12:57 -05009201 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009202 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must contain at least one of access bits %d "
9203 "%s when layout is %s, unless the app has previously added a "
9204 "barrier for this transition.",
9205 type, accessMask, string_VkAccessFlags(accessMask).c_str(), optional_bits,
9206 string_VkAccessFlags(optional_bits).c_str(), string_VkImageLayout(layout));
9207 } else {
9208 std::string opt_bits;
9209 if (optional_bits != 0) {
9210 std::stringstream ss;
9211 ss << optional_bits;
9212 opt_bits = "and may have optional bits " + ss.str() + ' ' + string_VkAccessFlags(optional_bits);
9213 }
Michael Lentine02508232016-04-13 17:12:57 -05009214 skip_call |= log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009215 DRAWSTATE_INVALID_BARRIER, "DS", "%s AccessMask %d %s must have required access bit %d %s %s when "
9216 "layout is %s, unless the app has previously added a barrier for "
9217 "this transition.",
9218 type, accessMask, string_VkAccessFlags(accessMask).c_str(), required_bit,
9219 string_VkAccessFlags(required_bit).c_str(), opt_bits.c_str(), string_VkImageLayout(layout));
9220 }
9221 }
9222 return skip_call;
9223}
9224
Dustin Graves8f1eab92016-04-05 09:41:17 -06009225static bool ValidateMaskBitsFromLayouts(const layer_data *my_data, VkCommandBuffer cmdBuffer, const VkAccessFlags &accessMask,
9226 const VkImageLayout &layout, const char *type) {
9227 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009228 switch (layout) {
9229 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: {
9230 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
Jan-Harald Fredriksendef60332016-10-21 15:14:16 +02009231 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009232 break;
9233 }
9234 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: {
9235 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
Jan-Harald Fredriksendef60332016-10-21 15:14:16 +02009236 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009237 break;
9238 }
9239 case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: {
9240 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_WRITE_BIT, 0, type);
9241 break;
9242 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009243 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: {
9244 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
Jan-Harald Fredriksendef60332016-10-21 15:14:16 +02009245 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
9246 VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT, type);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009247 break;
9248 }
9249 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: {
9250 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, 0,
9251 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, type);
9252 break;
9253 }
9254 case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: {
9255 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_TRANSFER_READ_BIT, 0, type);
9256 break;
9257 }
Tony Barbourc0676ef2016-09-29 13:41:49 -06009258 case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR: {
9259 skip_call |= ValidateMaskBits(my_data, cmdBuffer, accessMask, layout, VK_ACCESS_MEMORY_READ_BIT, 0, type);
9260 break;
9261 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009262 case VK_IMAGE_LAYOUT_UNDEFINED: {
9263 if (accessMask != 0) {
9264 // TODO: Verify against Valid Use section spec
9265 skip_call |=
9266 log_msg(my_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlis6fb205c2016-05-23 13:45:36 -06009267 DRAWSTATE_INVALID_BARRIER, "DS", "Additional bits in %s accessMask 0x%X %s are specified when layout is %s.",
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009268 type, accessMask, string_VkAccessFlags(accessMask).c_str(), string_VkImageLayout(layout));
9269 }
9270 break;
9271 }
9272 case VK_IMAGE_LAYOUT_GENERAL:
9273 default: { break; }
9274 }
9275 return skip_call;
9276}
9277
Dustin Graves8f1eab92016-04-05 09:41:17 -06009278static bool ValidateBarriers(const char *funcName, VkCommandBuffer cmdBuffer, uint32_t memBarrierCount,
9279 const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
9280 const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
9281 const VkImageMemoryBarrier *pImageMemBarriers) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009282 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009283 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(cmdBuffer), layer_data_map);
9284 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, cmdBuffer);
9285 if (pCB->activeRenderPass && memBarrierCount) {
Chris Forbesa4937a72016-05-06 16:31:14 +12009286 if (!pCB->activeRenderPass->hasSelfDependency[pCB->activeSubpass]) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009287 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9288 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Barriers cannot be set during subpass %d "
9289 "with no self dependency specified.",
9290 funcName, pCB->activeSubpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009291 }
9292 }
9293 for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
9294 auto mem_barrier = &pImageMemBarriers[i];
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009295 auto image_data = getImageState(dev_data, mem_barrier->image);
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -06009296 if (image_data) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009297 uint32_t src_q_f_index = mem_barrier->srcQueueFamilyIndex;
9298 uint32_t dst_q_f_index = mem_barrier->dstQueueFamilyIndex;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -06009299 if (image_data->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009300 // srcQueueFamilyIndex and dstQueueFamilyIndex must both
9301 // be VK_QUEUE_FAMILY_IGNORED
9302 if ((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009303 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9304 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9305 "%s: Image Barrier for image 0x%" PRIx64 " was created with sharingMode of "
9306 "VK_SHARING_MODE_CONCURRENT. Src and dst "
9307 "queueFamilyIndices must be VK_QUEUE_FAMILY_IGNORED.",
9308 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009309 }
9310 } else {
9311 // Sharing mode is VK_SHARING_MODE_EXCLUSIVE. srcQueueFamilyIndex and
9312 // dstQueueFamilyIndex must either both be VK_QUEUE_FAMILY_IGNORED,
9313 // or both be a valid queue family
9314 if (((src_q_f_index == VK_QUEUE_FAMILY_IGNORED) || (dst_q_f_index == VK_QUEUE_FAMILY_IGNORED)) &&
9315 (src_q_f_index != dst_q_f_index)) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009316 skip |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009317 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9318 DRAWSTATE_INVALID_QUEUE_INDEX, "DS", "%s: Image 0x%" PRIx64 " was created with sharingMode "
9319 "of VK_SHARING_MODE_EXCLUSIVE. If one of src- or "
9320 "dstQueueFamilyIndex is VK_QUEUE_FAMILY_IGNORED, both "
9321 "must be.",
9322 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image));
9323 } else if (((src_q_f_index != VK_QUEUE_FAMILY_IGNORED) && (dst_q_f_index != VK_QUEUE_FAMILY_IGNORED)) &&
Tobin Ehlise54be7b2016-04-11 14:49:55 -06009324 ((src_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
9325 (dst_q_f_index >= dev_data->phys_dev_properties.queue_family_properties.size()))) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009326 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9327 __LINE__, DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9328 "%s: Image 0x%" PRIx64 " was created with sharingMode "
9329 "of VK_SHARING_MODE_EXCLUSIVE, but srcQueueFamilyIndex %d"
9330 " or dstQueueFamilyIndex %d is greater than " PRINTF_SIZE_T_SPECIFIER
9331 "queueFamilies crated for this device.",
9332 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->image), src_q_f_index, dst_q_f_index,
9333 dev_data->phys_dev_properties.queue_family_properties.size());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009334 }
9335 }
9336 }
9337
9338 if (mem_barrier) {
Tony Barbour14740bb2016-10-04 12:03:50 -06009339 if (mem_barrier->oldLayout != mem_barrier->newLayout) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009340 skip |=
Tony Barbour14740bb2016-10-04 12:03:50 -06009341 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->srcAccessMask, mem_barrier->oldLayout, "Source");
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009342 skip |=
Tony Barbour14740bb2016-10-04 12:03:50 -06009343 ValidateMaskBitsFromLayouts(dev_data, cmdBuffer, mem_barrier->dstAccessMask, mem_barrier->newLayout, "Dest");
9344 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009345 if (mem_barrier->newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier->newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009346 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9347 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Image Layout cannot be transitioned to UNDEFINED or "
9348 "PREINITIALIZED.",
9349 funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009350 }
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009351 auto image_data = getImageState(dev_data, mem_barrier->image);
Jamie Madill2b6b8d52016-04-04 15:09:51 -04009352 VkFormat format = VK_FORMAT_UNDEFINED;
9353 uint32_t arrayLayers = 0, mipLevels = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009354 bool imageFound = false;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -06009355 if (image_data) {
9356 format = image_data->createInfo.format;
9357 arrayLayers = image_data->createInfo.arrayLayers;
9358 mipLevels = image_data->createInfo.mipLevels;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009359 imageFound = true;
9360 } else if (dev_data->device_extensions.wsi_enabled) {
Tobin Ehlis969a5262016-06-02 12:13:32 -06009361 auto imageswap_data = getSwapchainFromImage(dev_data, mem_barrier->image);
9362 if (imageswap_data) {
Tobin Ehlis4e380592016-06-02 12:41:47 -06009363 auto swapchain_data = getSwapchainNode(dev_data, imageswap_data);
9364 if (swapchain_data) {
9365 format = swapchain_data->createInfo.imageFormat;
9366 arrayLayers = swapchain_data->createInfo.imageArrayLayers;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009367 mipLevels = 1;
9368 imageFound = true;
9369 }
9370 }
9371 }
9372 if (imageFound) {
Tobin Ehlisd7e548b2016-07-19 17:39:34 -06009373 auto aspect_mask = mem_barrier->subresourceRange.aspectMask;
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009374 skip |= ValidateImageAspectMask(dev_data, image_data->image, format, aspect_mask, funcName);
Michael Lentine87e44e02016-03-18 14:49:09 -05009375 int layerCount = (mem_barrier->subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS)
9376 ? 1
9377 : mem_barrier->subresourceRange.layerCount;
9378 if ((mem_barrier->subresourceRange.baseArrayLayer + layerCount) > arrayLayers) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009379 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
9380 __LINE__, DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the "
9381 "baseArrayLayer (%d) and layerCount (%d) be less "
9382 "than or equal to the total number of layers (%d).",
9383 funcName, mem_barrier->subresourceRange.baseArrayLayer,
9384 mem_barrier->subresourceRange.layerCount, arrayLayers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009385 }
Michael Lentine87e44e02016-03-18 14:49:09 -05009386 int levelCount = (mem_barrier->subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS)
9387 ? 1
9388 : mem_barrier->subresourceRange.levelCount;
9389 if ((mem_barrier->subresourceRange.baseMipLevel + levelCount) > mipLevels) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009390 skip |= log_msg(
9391 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9392 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Subresource must have the sum of the baseMipLevel "
9393 "(%d) and levelCount (%d) be less than or equal to "
9394 "the total number of levels (%d).",
9395 funcName, mem_barrier->subresourceRange.baseMipLevel, mem_barrier->subresourceRange.levelCount, mipLevels);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009396 }
9397 }
9398 }
9399 }
9400 for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
9401 auto mem_barrier = &pBufferMemBarriers[i];
9402 if (pCB->activeRenderPass) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009403 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9404 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barriers cannot be used during a render pass.", funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009405 }
9406 if (!mem_barrier)
9407 continue;
9408
9409 // Validate buffer barrier queue family indices
9410 if ((mem_barrier->srcQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
Tobin Ehlise54be7b2016-04-11 14:49:55 -06009411 mem_barrier->srcQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size()) ||
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009412 (mem_barrier->dstQueueFamilyIndex != VK_QUEUE_FAMILY_IGNORED &&
Tobin Ehlise54be7b2016-04-11 14:49:55 -06009413 mem_barrier->dstQueueFamilyIndex >= dev_data->phys_dev_properties.queue_family_properties.size())) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009414 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9415 DRAWSTATE_INVALID_QUEUE_INDEX, "DS",
9416 "%s: Buffer Barrier 0x%" PRIx64 " has QueueFamilyIndex greater "
9417 "than the number of QueueFamilies (" PRINTF_SIZE_T_SPECIFIER ") for this device.",
9418 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9419 dev_data->phys_dev_properties.queue_family_properties.size());
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009420 }
9421
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009422 auto buffer_state = getBufferState(dev_data, mem_barrier->buffer);
9423 if (buffer_state) {
9424 auto buffer_size = buffer_state->requirements.size;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009425 if (mem_barrier->offset >= buffer_size) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009426 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9427 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64
9428 " which is not less than total size 0x%" PRIx64 ".",
9429 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9430 reinterpret_cast<const uint64_t &>(mem_barrier->offset),
9431 reinterpret_cast<const uint64_t &>(buffer_size));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009432 } else if (mem_barrier->size != VK_WHOLE_SIZE && (mem_barrier->offset + mem_barrier->size > buffer_size)) {
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009433 skip |= log_msg(
Tobin Ehlisf263ba42016-04-05 13:33:00 -06009434 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mark Muelleraab36502016-05-03 13:17:29 -06009435 DRAWSTATE_INVALID_BARRIER, "DS", "%s: Buffer Barrier 0x%" PRIx64 " has offset 0x%" PRIx64 " and size 0x%" PRIx64
9436 " whose sum is greater than total size 0x%" PRIx64 ".",
Tobin Ehlisf263ba42016-04-05 13:33:00 -06009437 funcName, reinterpret_cast<const uint64_t &>(mem_barrier->buffer),
9438 reinterpret_cast<const uint64_t &>(mem_barrier->offset), reinterpret_cast<const uint64_t &>(mem_barrier->size),
9439 reinterpret_cast<const uint64_t &>(buffer_size));
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009440 }
9441 }
9442 }
Tobin Ehlis8d79b2e2016-10-26 14:13:46 -06009443 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009444}
9445
Chris Forbesf321eba2016-03-31 11:22:37 +13009446bool validateEventStageMask(VkQueue queue, GLOBAL_CB_NODE *pCB, uint32_t eventCount, size_t firstEventIndex, VkPipelineStageFlags sourceStageMask) {
Michael Lentineb653eb22016-03-18 14:11:44 -05009447 bool skip_call = false;
9448 VkPipelineStageFlags stageMask = 0;
9449 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
9450 for (uint32_t i = 0; i < eventCount; ++i) {
Chris Forbesf321eba2016-03-31 11:22:37 +13009451 auto event = pCB->events[firstEventIndex + i];
Michael Lentineb653eb22016-03-18 14:11:44 -05009452 auto queue_data = dev_data->queueMap.find(queue);
9453 if (queue_data == dev_data->queueMap.end())
9454 return false;
Chris Forbesf321eba2016-03-31 11:22:37 +13009455 auto event_data = queue_data->second.eventToStageMap.find(event);
Michael Lentineb653eb22016-03-18 14:11:44 -05009456 if (event_data != queue_data->second.eventToStageMap.end()) {
9457 stageMask |= event_data->second;
9458 } else {
Tobin Ehliscab6b7d2016-07-07 16:47:10 -06009459 auto global_event_data = getEventNode(dev_data, event);
9460 if (!global_event_data) {
Michael Lentineb653eb22016-03-18 14:11:44 -05009461 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
Chris Forbes0fa92212016-03-31 11:47:29 +13009462 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_INVALID_EVENT, "DS",
9463 "Event 0x%" PRIx64 " cannot be waited on if it has never been set.",
Chris Forbesf321eba2016-03-31 11:22:37 +13009464 reinterpret_cast<const uint64_t &>(event));
Michael Lentineb653eb22016-03-18 14:11:44 -05009465 } else {
Tobin Ehliscab6b7d2016-07-07 16:47:10 -06009466 stageMask |= global_event_data->stageMask;
Michael Lentineb653eb22016-03-18 14:11:44 -05009467 }
9468 }
9469 }
Michael Lentine860b0fe2016-05-20 10:14:00 -05009470 // TODO: Need to validate that host_bit is only set if set event is called
9471 // but set event can be called at any time.
9472 if (sourceStageMask != stageMask && sourceStageMask != (stageMask | VK_PIPELINE_STAGE_HOST_BIT)) {
9473 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009474 VALIDATION_ERROR_00254, "DS", "Submitting cmdbuffer with call to VkCmdWaitEvents "
9475 "using srcStageMask 0x%X which must be the bitwise "
9476 "OR of the stageMask parameters used in calls to "
9477 "vkCmdSetEvent and VK_PIPELINE_STAGE_HOST_BIT if "
9478 "used with vkSetEvent but instead is 0x%X. %s",
9479 sourceStageMask, stageMask, validation_error_map[VALIDATION_ERROR_00254]);
Michael Lentineb653eb22016-03-18 14:11:44 -05009480 }
9481 return skip_call;
9482}
9483
Mark Lobodzinskie6ce3f62016-12-10 10:53:34 -07009484// Note that we only check bits that HAVE required queueflags -- don't care entries are skipped
9485static std::unordered_map<VkPipelineStageFlags, VkQueueFlags> supported_pipeline_stages_table = {
9486 {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9487 {VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT},
9488 {VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9489 {VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9490 {VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9491 {VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9492 {VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9493 {VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, VK_QUEUE_GRAPHICS_BIT},
9494 {VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9495 {VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, VK_QUEUE_GRAPHICS_BIT},
9496 {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_QUEUE_GRAPHICS_BIT},
9497 {VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_QUEUE_COMPUTE_BIT},
9498 {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT},
9499 {VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_QUEUE_GRAPHICS_BIT}};
9500
9501static const VkPipelineStageFlags stage_flag_bit_array[] = {VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX,
9502 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT,
9503 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
9504 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
9505 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT,
9506 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT,
9507 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT,
9508 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
9509 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
9510 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
9511 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
9512 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
9513 VK_PIPELINE_STAGE_TRANSFER_BIT,
9514 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT};
9515
9516bool CheckStageMaskQueueCompatibility(layer_data *dev_data, VkCommandBuffer command_buffer, VkPipelineStageFlags stage_mask,
9517 VkQueueFlags queue_flags, const char *function, const char *src_or_dest,
9518 UNIQUE_VALIDATION_ERROR_CODE error_code) {
9519 bool skip = false;
9520 // Lookup each bit in the stagemask and check for overlap between its table bits and queue_flags
9521 for (const auto &item : stage_flag_bit_array) {
9522 if (stage_mask & item) {
9523 if ((supported_pipeline_stages_table[item] & queue_flags) == 0) {
9524 skip |=
9525 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
9526 reinterpret_cast<uint64_t &>(command_buffer), __LINE__, error_code, "DL",
9527 "%s(): %s flag %s is not compatible with the queue family properties of this "
9528 "command buffer. %s",
9529 function, src_or_dest, string_VkPipelineStageFlagBits(static_cast<VkPipelineStageFlagBits>(item)),
9530 validation_error_map[error_code]);
9531 }
9532 }
9533 }
9534 return skip;
9535}
9536
9537bool ValidateStageMasksAgainstQueueCapabilities(layer_data *dev_data, GLOBAL_CB_NODE *cb_state,
9538 VkPipelineStageFlags source_stage_mask, VkPipelineStageFlags dest_stage_mask,
9539 const char *function, UNIQUE_VALIDATION_ERROR_CODE error_code) {
9540 bool skip = false;
9541 uint32_t queue_family_index = dev_data->commandPoolMap[cb_state->createInfo.commandPool].queueFamilyIndex;
9542 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(dev_data->physical_device), instance_layer_data_map);
9543 auto physical_device_state = getPhysicalDeviceState(instance_data, dev_data->physical_device);
9544
9545 // Any pipeline stage included in srcStageMask or dstStageMask must be supported by the capabilities of the queue family
9546 // specified by the queueFamilyIndex member of the VkCommandPoolCreateInfo structure that was used to create the VkCommandPool
9547 // that commandBuffer was allocated from, as specified in the table of supported pipeline stages.
9548
9549 if (queue_family_index < physical_device_state->queue_family_properties.size()) {
9550 VkQueueFlags specified_queue_flags = physical_device_state->queue_family_properties[queue_family_index].queueFlags;
9551
9552 if ((source_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9553 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, source_stage_mask, specified_queue_flags,
9554 function, "srcStageMask", error_code);
9555 }
9556 if ((dest_stage_mask & VK_PIPELINE_STAGE_ALL_COMMANDS_BIT) == 0) {
9557 skip |= CheckStageMaskQueueCompatibility(dev_data, cb_state->commandBuffer, dest_stage_mask, specified_queue_flags,
9558 function, "dstStageMask", error_code);
9559 }
9560 }
9561 return skip;
9562}
9563
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009564VKAPI_ATTR void VKAPI_CALL CmdWaitEvents(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent *pEvents,
9565 VkPipelineStageFlags sourceStageMask, VkPipelineStageFlags dstStageMask,
9566 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9567 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9568 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9569 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009570 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009571 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009572 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9573 if (cb_state) {
9574 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, sourceStageMask, dstStageMask, "vkCmdWaitEvents",
9575 VALIDATION_ERROR_02510);
9576 auto first_event_index = cb_state->events.size();
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009577 for (uint32_t i = 0; i < eventCount; ++i) {
Tobin Ehlis1af17132016-10-20 14:17:21 -06009578 auto event_state = getEventNode(dev_data, pEvents[i]);
9579 if (event_state) {
9580 addCommandBufferBinding(&event_state->cb_bindings,
Tobin Ehlisb073d992016-07-07 16:47:10 -06009581 {reinterpret_cast<const uint64_t &>(pEvents[i]), VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT},
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009582 cb_state);
9583 event_state->cb_bindings.insert(cb_state);
Tobin Ehlisb073d992016-07-07 16:47:10 -06009584 }
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009585 cb_state->waitedEvents.insert(pEvents[i]);
9586 cb_state->events.push_back(pEvents[i]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009587 }
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009588 std::function<bool(VkQueue)> event_update =
9589 std::bind(validateEventStageMask, std::placeholders::_1, cb_state, eventCount, first_event_index, sourceStageMask);
9590 cb_state->eventUpdates.push_back(event_update);
9591 if (cb_state->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009592 skip |= ValidateCmd(dev_data, cb_state, CMD_WAITEVENTS, "vkCmdWaitEvents()");
9593 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_WAITEVENTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009594 } else {
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009595 skip |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWaitEvents()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009596 }
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009597 skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9598 skip |= ValidateBarriers("vkCmdWaitEvents", commandBuffer, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
9599 pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009600 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009601 lock.unlock();
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009602 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009603 dev_data->dispatch_table.CmdWaitEvents(commandBuffer, eventCount, pEvents, sourceStageMask, dstStageMask,
9604 memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9605 imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009606}
9607
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009608VKAPI_ATTR void VKAPI_CALL CmdPipelineBarrier(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask,
9609 VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags,
9610 uint32_t memoryBarrierCount, const VkMemoryBarrier *pMemoryBarriers,
9611 uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier *pBufferMemoryBarriers,
9612 uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier *pImageMemoryBarriers) {
9613 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009614 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009615 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009616 GLOBAL_CB_NODE *cb_state = getCBNode(dev_data, commandBuffer);
9617 if (cb_state) {
9618 skip |= ValidateStageMasksAgainstQueueCapabilities(dev_data, cb_state, srcStageMask, dstStageMask, "vkCmdPipelineBarrier",
9619 VALIDATION_ERROR_02513);
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009620 skip |= ValidateCmd(dev_data, cb_state, CMD_PIPELINEBARRIER, "vkCmdPipelineBarrier()");
9621 UpdateCmdBufferLastCmd(dev_data, cb_state, CMD_PIPELINEBARRIER);
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009622 skip |= TransitionImageLayouts(commandBuffer, imageMemoryBarrierCount, pImageMemoryBarriers);
9623 skip |= ValidateBarriers("vkCmdPipelineBarrier", commandBuffer, memoryBarrierCount, pMemoryBarriers,
9624 bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009625 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009626 lock.unlock();
Mark Lobodzinski45f44922016-12-12 08:27:42 -07009627 if (!skip)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009628 dev_data->dispatch_table.CmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount,
9629 pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers,
9630 imageMemoryBarrierCount, pImageMemoryBarriers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009631}
9632
Michael Lentine5627e692016-05-20 17:45:02 -05009633bool setQueryState(VkQueue queue, VkCommandBuffer commandBuffer, QueryObject object, bool value) {
9634 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
9635 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9636 if (pCB) {
9637 pCB->queryToStateMap[object] = value;
9638 }
9639 auto queue_data = dev_data->queueMap.find(queue);
9640 if (queue_data != dev_data->queueMap.end()) {
9641 queue_data->second.queryToStateMap[object] = value;
9642 }
9643 return false;
9644}
9645
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009646VKAPI_ATTR void VKAPI_CALL
9647CmdBeginQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot, VkFlags flags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009648 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009649 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009650 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009651 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9652 if (pCB) {
9653 QueryObject query = {queryPool, slot};
9654 pCB->activeQueries.insert(query);
9655 if (!pCB->startedQueries.count(query)) {
9656 pCB->startedQueries.insert(query);
9657 }
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009658 skip_call |= ValidateCmd(dev_data, pCB, CMD_BEGINQUERY, "vkCmdBeginQuery()");
9659 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_BEGINQUERY);
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009660 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9661 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009662 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009663 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009664 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009665 dev_data->dispatch_table.CmdBeginQuery(commandBuffer, queryPool, slot, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009666}
9667
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009668VKAPI_ATTR void VKAPI_CALL CmdEndQuery(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t slot) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009669 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009670 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009671 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009672 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9673 if (pCB) {
9674 QueryObject query = {queryPool, slot};
9675 if (!pCB->activeQueries.count(query)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009676 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009677 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009678 VALIDATION_ERROR_01041, "DS", "Ending a query before it was started: queryPool 0x%" PRIx64 ", index %d. %s",
9679 (uint64_t)(queryPool), slot, validation_error_map[VALIDATION_ERROR_01041]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009680 } else {
9681 pCB->activeQueries.erase(query);
9682 }
Karl Schultz63c1eb52016-05-24 13:49:59 -06009683 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
Michael Lentine5627e692016-05-20 17:45:02 -05009684 pCB->queryUpdates.push_back(queryUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009685 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009686 skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDQUERY, "VkCmdEndQuery()");
9687 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDQUERY);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009688 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009689 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdEndQuery()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009690 }
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009691 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9692 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009693 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009694 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009695 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009696 dev_data->dispatch_table.CmdEndQuery(commandBuffer, queryPool, slot);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009697}
9698
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009699VKAPI_ATTR void VKAPI_CALL
9700CmdResetQueryPool(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009701 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009702 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009703 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009704 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9705 if (pCB) {
9706 for (uint32_t i = 0; i < queryCount; i++) {
9707 QueryObject query = {queryPool, firstQuery + i};
9708 pCB->waitedEventsBeforeQueryReset[query] = pCB->waitedEvents;
Karl Schultz63c1eb52016-05-24 13:49:59 -06009709 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, false);
Michael Lentine5627e692016-05-20 17:45:02 -05009710 pCB->queryUpdates.push_back(queryUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009711 }
9712 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009713 skip_call |= ValidateCmd(dev_data, pCB, CMD_RESETQUERYPOOL, "VkCmdResetQueryPool()");
9714 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_RESETQUERYPOOL);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009715 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009716 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdResetQueryPool()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009717 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009718 skip_call |= insideRenderPass(dev_data, pCB, "vkCmdResetQueryPool()", VALIDATION_ERROR_01025);
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009719 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9720 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, pCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009721 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009722 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009723 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009724 dev_data->dispatch_table.CmdResetQueryPool(commandBuffer, queryPool, firstQuery, queryCount);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009725}
9726
Michael Lentine5627e692016-05-20 17:45:02 -05009727bool validateQuery(VkQueue queue, GLOBAL_CB_NODE *pCB, VkQueryPool queryPool, uint32_t queryCount, uint32_t firstQuery) {
9728 bool skip_call = false;
9729 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(pCB->commandBuffer), layer_data_map);
9730 auto queue_data = dev_data->queueMap.find(queue);
9731 if (queue_data == dev_data->queueMap.end())
9732 return false;
9733 for (uint32_t i = 0; i < queryCount; i++) {
9734 QueryObject query = {queryPool, firstQuery + i};
9735 auto query_data = queue_data->second.queryToStateMap.find(query);
9736 bool fail = false;
9737 if (query_data != queue_data->second.queryToStateMap.end()) {
9738 if (!query_data->second) {
9739 fail = true;
9740 }
9741 } else {
9742 auto global_query_data = dev_data->queryToStateMap.find(query);
9743 if (global_query_data != dev_data->queryToStateMap.end()) {
9744 if (!global_query_data->second) {
9745 fail = true;
9746 }
9747 } else {
9748 fail = true;
9749 }
9750 }
9751 if (fail) {
9752 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9753 DRAWSTATE_INVALID_QUERY, "DS",
9754 "Requesting a copy from query to buffer with invalid query: queryPool 0x%" PRIx64 ", index %d",
9755 reinterpret_cast<uint64_t &>(queryPool), firstQuery + i);
9756 }
9757 }
9758 return skip_call;
9759}
9760
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009761VKAPI_ATTR void VKAPI_CALL
9762CmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount,
9763 VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009764 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009765 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009766 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009767
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009768 auto cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009769 auto dst_buff_state = getBufferState(dev_data, dstBuffer);
9770 if (cb_node && dst_buff_state) {
Tobin Ehlise1995fc2016-12-22 12:45:09 -07009771 skip_call |= ValidateMemoryIsBoundToBuffer(dev_data, dst_buff_state, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_02526);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009772 // Update bindings between buffer and cmd buffer
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009773 AddCommandBufferBindingBuffer(dev_data, cb_node, dst_buff_state);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009774 // Validate that DST buffer has correct usage flags set
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07009775 skip_call |=
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009776 ValidateBufferUsageFlags(dev_data, dst_buff_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, VALIDATION_ERROR_01066,
Jeremy Hayesc5a60b02016-11-15 15:41:47 -07009777 "vkCmdCopyQueryPoolResults()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
Dustin Graves8f1eab92016-04-05 09:41:17 -06009778 std::function<bool()> function = [=]() {
Tobin Ehlis4668dce2016-11-16 09:30:23 -07009779 SetBufferMemoryValid(dev_data, dst_buff_state, true);
Dustin Graves8f1eab92016-04-05 09:41:17 -06009780 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009781 };
Tobin Ehlis6dd053f2016-06-24 12:31:29 -06009782 cb_node->validate_functions.push_back(function);
Michael Lentine5627e692016-05-20 17:45:02 -05009783 std::function<bool(VkQueue)> queryUpdate =
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009784 std::bind(validateQuery, std::placeholders::_1, cb_node, queryPool, queryCount, firstQuery);
9785 cb_node->queryUpdates.push_back(queryUpdate);
9786 if (cb_node->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009787 skip_call |= ValidateCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS, "vkCmdCopyQueryPoolResults()");
9788 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_COPYQUERYPOOLRESULTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009789 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009790 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdCopyQueryPoolResults()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009791 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -07009792 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdCopyQueryPoolResults()", VALIDATION_ERROR_01074);
Tobin Ehlis2e8f5322016-07-08 14:22:01 -06009793 addCommandBufferBinding(&getQueryPoolNode(dev_data, queryPool)->cb_bindings,
9794 {reinterpret_cast<uint64_t &>(queryPool), VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT}, cb_node);
Tobin Ehlis3ddfa5b2016-06-28 14:04:54 -06009795 } else {
9796 assert(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009797 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009798 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009799 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009800 dev_data->dispatch_table.CmdCopyQueryPoolResults(commandBuffer, queryPool, firstQuery, queryCount, dstBuffer, dstOffset,
9801 stride, flags);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009802}
9803
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009804VKAPI_ATTR void VKAPI_CALL CmdPushConstants(VkCommandBuffer commandBuffer, VkPipelineLayout layout,
9805 VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size,
9806 const void *pValues) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009807 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009808 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009809 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009810 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9811 if (pCB) {
9812 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009813 skip_call |= ValidateCmd(dev_data, pCB, CMD_PUSHCONSTANTS, "vkCmdPushConstants()");
9814 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_PUSHCONSTANTS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009815 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009816 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdPushConstants()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009817 }
9818 }
Tobin Ehlisfe871282016-06-28 10:28:02 -06009819 skip_call |= validatePushConstantRange(dev_data, offset, size, "vkCmdPushConstants()");
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009820 if (0 == stageFlags) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009821 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009822 VALIDATION_ERROR_00996, "DS", "vkCmdPushConstants() call has no stageFlags set. %s",
9823 validation_error_map[VALIDATION_ERROR_00996]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009824 }
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009825
Karl Schultzc81037d2016-05-12 08:11:23 -06009826 // Check if push constant update is within any of the ranges with the same stage flags specified in pipeline layout.
Tobin Ehlisc1d9be12016-10-13 10:18:18 -06009827 auto pipeline_layout = getPipelineLayout(dev_data, layout);
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009828 // Coalesce adjacent/overlapping pipeline ranges before checking to see if incoming range is
9829 // contained in the pipeline ranges.
9830 // Build a {start, end} span list for ranges with matching stage flags.
9831 const auto &ranges = pipeline_layout->push_constant_ranges;
9832 struct span {
9833 uint32_t start;
9834 uint32_t end;
9835 };
9836 std::vector<span> spans;
9837 spans.reserve(ranges.size());
9838 for (const auto &iter : ranges) {
9839 if (iter.stageFlags == stageFlags) {
9840 spans.push_back({iter.offset, iter.offset + iter.size});
9841 }
9842 }
9843 if (spans.size() == 0) {
9844 // There were no ranges that matched the stageFlags.
9845 skip_call |=
9846 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009847 VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() stageFlags = 0x%" PRIx32 " do not match "
9848 "the stageFlags in any of the ranges in pipeline layout 0x%" PRIx64 ". %s",
9849 (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009850 } else {
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009851 // Sort span list by start value.
9852 struct comparer {
9853 bool operator()(struct span i, struct span j) { return i.start < j.start; }
9854 } my_comparer;
9855 std::sort(spans.begin(), spans.end(), my_comparer);
9856
9857 // Examine two spans at a time.
9858 std::vector<span>::iterator current = spans.begin();
9859 std::vector<span>::iterator next = current + 1;
9860 while (next != spans.end()) {
9861 if (current->end < next->start) {
9862 // There is a gap; cannot coalesce. Move to the next two spans.
9863 ++current;
9864 ++next;
9865 } else {
9866 // Coalesce the two spans. The start of the next span
9867 // is within the current span, so pick the larger of
9868 // the end values to extend the current span.
9869 // Then delete the next span and set next to the span after it.
9870 current->end = max(current->end, next->end);
9871 next = spans.erase(next);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009872 }
9873 }
Karl Schultzc81037d2016-05-12 08:11:23 -06009874
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009875 // Now we can check if the incoming range is within any of the spans.
9876 bool contained_in_a_range = false;
9877 for (uint32_t i = 0; i < spans.size(); ++i) {
9878 if ((offset >= spans[i].start) && ((uint64_t)offset + (uint64_t)size <= (uint64_t)spans[i].end)) {
9879 contained_in_a_range = true;
9880 break;
Karl Schultzc81037d2016-05-12 08:11:23 -06009881 }
Tobin Ehlisfbc847c2016-07-18 16:22:03 -06009882 }
9883 if (!contained_in_a_range) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009884 skip_call |= log_msg(
9885 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
9886 VALIDATION_ERROR_00988, "DS", "vkCmdPushConstants() Push constant range [%d, %d) "
9887 "with stageFlags = 0x%" PRIx32 " "
9888 "not within flag-matching ranges in pipeline layout 0x%" PRIx64 ". %s",
9889 offset, offset + size, (uint32_t)stageFlags, (uint64_t)layout, validation_error_map[VALIDATION_ERROR_00988]);
Karl Schultzfc8eaf12016-05-06 13:56:42 -06009890 }
9891 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009892 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009893 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009894 dev_data->dispatch_table.CmdPushConstants(commandBuffer, layout, stageFlags, offset, size, pValues);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009895}
9896
Chia-I Wu629d7cd2016-05-06 11:32:54 +08009897VKAPI_ATTR void VKAPI_CALL
9898CmdWriteTimestamp(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t slot) {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009899 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009900 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009901 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009902 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
9903 if (pCB) {
9904 QueryObject query = {queryPool, slot};
Karl Schultz63c1eb52016-05-24 13:49:59 -06009905 std::function<bool(VkQueue)> queryUpdate = std::bind(setQueryState, std::placeholders::_1, commandBuffer, query, true);
Michael Lentine5627e692016-05-20 17:45:02 -05009906 pCB->queryUpdates.push_back(queryUpdate);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009907 if (pCB->state == CB_RECORDING) {
Tobin Ehlis1c883a02016-12-19 15:59:16 -07009908 skip_call |= ValidateCmd(dev_data, pCB, CMD_WRITETIMESTAMP, "vkCmdWriteTimestamp()");
9909 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_WRITETIMESTAMP);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009910 } else {
Tobin Ehlisfe871282016-06-28 10:28:02 -06009911 skip_call |= report_error_no_cb_begin(dev_data, commandBuffer, "vkCmdWriteTimestamp()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009912 }
9913 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -06009914 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -06009915 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +13009916 dev_data->dispatch_table.CmdWriteTimestamp(commandBuffer, pipelineStage, queryPool, slot);
Tobin Ehlisc96f8062016-03-09 16:12:48 -07009917}
9918
Mark Lobodzinski67533742016-06-16 13:23:02 -06009919static bool MatchUsage(layer_data *dev_data, uint32_t count, const VkAttachmentReference *attachments,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009920 const VkFramebufferCreateInfo *fbci, VkImageUsageFlagBits usage_flag,
9921 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Mark Lobodzinski67533742016-06-16 13:23:02 -06009922 bool skip_call = false;
9923
9924 for (uint32_t attach = 0; attach < count; attach++) {
9925 if (attachments[attach].attachment != VK_ATTACHMENT_UNUSED) {
9926 // Attachment counts are verified elsewhere, but prevent an invalid access
9927 if (attachments[attach].attachment < fbci->attachmentCount) {
9928 const VkImageView *image_view = &fbci->pAttachments[attachments[attach].attachment];
Tobin Ehlis8b26a382016-09-14 08:02:49 -06009929 auto view_state = getImageViewState(dev_data, *image_view);
9930 if (view_state) {
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009931 const VkImageCreateInfo *ici = &getImageState(dev_data, view_state->create_info.image)->createInfo;
Mark Lobodzinski67533742016-06-16 13:23:02 -06009932 if (ici != nullptr) {
9933 if ((ici->usage & usage_flag) == 0) {
9934 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009935 (VkDebugReportObjectTypeEXT)0, 0, __LINE__, error_code, "DS",
Mark Lobodzinski67533742016-06-16 13:23:02 -06009936 "vkCreateFramebuffer: Framebuffer Attachment (%d) conflicts with the image's "
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009937 "IMAGE_USAGE flags (%s). %s",
9938 attachments[attach].attachment, string_VkImageUsageFlagBits(usage_flag),
9939 validation_error_map[error_code]);
Mark Lobodzinski67533742016-06-16 13:23:02 -06009940 }
9941 }
9942 }
9943 }
9944 }
9945 }
9946 return skip_call;
9947}
9948
Tobin Ehlisd0945232016-06-22 10:02:02 -06009949// Validate VkFramebufferCreateInfo which includes:
9950// 1. attachmentCount equals renderPass attachmentCount
Tobin Ehlisfd005382016-06-22 13:32:23 -06009951// 2. corresponding framebuffer and renderpass attachments have matching formats
9952// 3. corresponding framebuffer and renderpass attachments have matching sample counts
9953// 4. fb attachments only have a single mip level
9954// 5. fb attachment dimensions are each at least as large as the fb
9955// 6. fb attachments use idenity swizzle
9956// 7. fb attachments used by renderPass for color/input/ds have correct usage bit set
Tobin Ehlisa60950f2016-06-23 08:23:25 -06009957// 8. fb dimensions are within physical device limits
Tobin Ehlisd0945232016-06-22 10:02:02 -06009958static bool ValidateFramebufferCreateInfo(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
Mark Lobodzinski67533742016-06-16 13:23:02 -06009959 bool skip_call = false;
9960
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -06009961 auto rp_state = getRenderPassState(dev_data, pCreateInfo->renderPass);
9962 if (rp_state) {
9963 const VkRenderPassCreateInfo *rpci = rp_state->createInfo.ptr();
Tobin Ehlisd0945232016-06-22 10:02:02 -06009964 if (rpci->attachmentCount != pCreateInfo->attachmentCount) {
9965 skip_call |= log_msg(
9966 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009967 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00404, "DS",
Tobin Ehlisd0945232016-06-22 10:02:02 -06009968 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachmentCount of %u does not match attachmentCount of %u of "
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009969 "renderPass (0x%" PRIxLEAST64 ") being used to create Framebuffer. %s",
9970 pCreateInfo->attachmentCount, rpci->attachmentCount, reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass),
9971 validation_error_map[VALIDATION_ERROR_00404]);
Tobin Ehlisfd005382016-06-22 13:32:23 -06009972 } else {
Tobin Ehlisdae051d2016-06-22 14:16:06 -06009973 // attachmentCounts match, so make sure corresponding attachment details line up
Tobin Ehlisfd005382016-06-22 13:32:23 -06009974 const VkImageView *image_views = pCreateInfo->pAttachments;
9975 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
Tobin Ehlis8b26a382016-09-14 08:02:49 -06009976 auto view_state = getImageViewState(dev_data, image_views[i]);
Tobin Ehlisc8ca0312016-09-22 07:30:05 -06009977 auto &ivci = view_state->create_info;
Tobin Ehlis8b26a382016-09-14 08:02:49 -06009978 if (ivci.format != rpci->pAttachments[i].format) {
Tobin Ehlisfd005382016-06-22 13:32:23 -06009979 skip_call |= log_msg(
9980 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009981 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00408, "DS",
9982 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has format of %s that does not match "
9983 "the format of "
9984 "%s used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
Tobin Ehlis8b26a382016-09-14 08:02:49 -06009985 i, string_VkFormat(ivci.format), string_VkFormat(rpci->pAttachments[i].format),
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009986 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00408]);
Tobin Ehlisfd005382016-06-22 13:32:23 -06009987 }
Tobin Ehlis30df15c2016-10-12 17:17:57 -06009988 const VkImageCreateInfo *ici = &getImageState(dev_data, ivci.image)->createInfo;
Tobin Ehlisfd005382016-06-22 13:32:23 -06009989 if (ici->samples != rpci->pAttachments[i].samples) {
Tobin Ehlisdae051d2016-06-22 14:16:06 -06009990 skip_call |= log_msg(
9991 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009992 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), __LINE__, VALIDATION_ERROR_00409, "DS",
9993 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has %s samples that do not match "
9994 "the %s samples used by the corresponding attachment for renderPass (0x%" PRIxLEAST64 "). %s",
Tobin Ehlisdae051d2016-06-22 14:16:06 -06009995 i, string_VkSampleCountFlagBits(ici->samples), string_VkSampleCountFlagBits(rpci->pAttachments[i].samples),
Mike Schuchardt8fb38062016-12-08 15:36:24 -07009996 reinterpret_cast<const uint64_t &>(pCreateInfo->renderPass), validation_error_map[VALIDATION_ERROR_00409]);
Tobin Ehlisfd005382016-06-22 13:32:23 -06009997 }
9998 // Verify that view only has a single mip level
Tobin Ehlis8b26a382016-09-14 08:02:49 -06009999 if (ivci.subresourceRange.levelCount != 1) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010000 skip_call |=
10001 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10002 VALIDATION_ERROR_00411, "DS",
10003 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has mip levelCount of %u "
10004 "but only a single mip level (levelCount == 1) is allowed when creating a Framebuffer. %s",
10005 i, ivci.subresourceRange.levelCount, validation_error_map[VALIDATION_ERROR_00411]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010006 }
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010007 const uint32_t mip_level = ivci.subresourceRange.baseMipLevel;
Tobin Ehlisf058eca2016-06-22 16:38:29 -060010008 uint32_t mip_width = max(1u, ici->extent.width >> mip_level);
10009 uint32_t mip_height = max(1u, ici->extent.height >> mip_level);
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010010 if ((ivci.subresourceRange.layerCount < pCreateInfo->layers) || (mip_width < pCreateInfo->width) ||
Tobin Ehlisf058eca2016-06-22 16:38:29 -060010011 (mip_height < pCreateInfo->height)) {
10012 skip_call |=
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010013 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Tobin Ehlisf058eca2016-06-22 16:38:29 -060010014 DRAWSTATE_INVALID_FRAMEBUFFER_CREATE_INFO, "DS",
10015 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u mip level %u has dimensions smaller "
10016 "than the corresponding "
10017 "framebuffer dimensions. Attachment dimensions must be at least as large. Here are the respective "
10018 "dimensions for "
10019 "attachment #%u, framebuffer:\n"
10020 "width: %u, %u\n"
10021 "height: %u, %u\n"
10022 "layerCount: %u, %u\n",
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010023 i, ivci.subresourceRange.baseMipLevel, i, mip_width, pCreateInfo->width, mip_height,
10024 pCreateInfo->height, ivci.subresourceRange.layerCount, pCreateInfo->layers);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010025 }
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010026 if (((ivci.components.r != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.r != VK_COMPONENT_SWIZZLE_R)) ||
10027 ((ivci.components.g != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.g != VK_COMPONENT_SWIZZLE_G)) ||
10028 ((ivci.components.b != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.b != VK_COMPONENT_SWIZZLE_B)) ||
10029 ((ivci.components.a != VK_COMPONENT_SWIZZLE_IDENTITY) && (ivci.components.a != VK_COMPONENT_SWIZZLE_A))) {
Tobin Ehlis9e4f16b2016-06-23 07:49:12 -060010030 skip_call |= log_msg(
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010031 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010032 VALIDATION_ERROR_00412, "DS",
Tobin Ehlis9e4f16b2016-06-23 07:49:12 -060010033 "vkCreateFramebuffer(): VkFramebufferCreateInfo attachment #%u has non-identy swizzle. All framebuffer "
10034 "attachments must have been created with the identity swizzle. Here are the actual swizzle values:\n"
10035 "r swizzle = %s\n"
10036 "g swizzle = %s\n"
10037 "b swizzle = %s\n"
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010038 "a swizzle = %s\n"
10039 "%s",
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010040 i, string_VkComponentSwizzle(ivci.components.r), string_VkComponentSwizzle(ivci.components.g),
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010041 string_VkComponentSwizzle(ivci.components.b), string_VkComponentSwizzle(ivci.components.a),
10042 validation_error_map[VALIDATION_ERROR_00412]);
Tobin Ehlisfd005382016-06-22 13:32:23 -060010043 }
Tobin Ehlisfd005382016-06-22 13:32:23 -060010044 }
Tobin Ehlisd0945232016-06-22 10:02:02 -060010045 }
Tobin Ehlisfd005382016-06-22 13:32:23 -060010046 // Verify correct attachment usage flags
Mark Lobodzinski67533742016-06-16 13:23:02 -060010047 for (uint32_t subpass = 0; subpass < rpci->subpassCount; subpass++) {
10048 // Verify input attachments:
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010049 skip_call |=
10050 MatchUsage(dev_data, rpci->pSubpasses[subpass].inputAttachmentCount, rpci->pSubpasses[subpass].pInputAttachments,
10051 pCreateInfo, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT, VALIDATION_ERROR_00407);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010052 // Verify color attachments:
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010053 skip_call |=
10054 MatchUsage(dev_data, rpci->pSubpasses[subpass].colorAttachmentCount, rpci->pSubpasses[subpass].pColorAttachments,
10055 pCreateInfo, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VALIDATION_ERROR_00405);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010056 // Verify depth/stencil attachments:
10057 if (rpci->pSubpasses[subpass].pDepthStencilAttachment != nullptr) {
10058 skip_call |= MatchUsage(dev_data, 1, rpci->pSubpasses[subpass].pDepthStencilAttachment, pCreateInfo,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010059 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VALIDATION_ERROR_00406);
Mark Lobodzinski67533742016-06-16 13:23:02 -060010060 }
10061 }
10062 }
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010063 // Verify FB dimensions are within physical device limits
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010064 if (pCreateInfo->width > dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth) {
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010065 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010066 VALIDATION_ERROR_00413, "DS",
10067 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo width exceeds physical device limits. "
10068 "Requested width: %u, device max: %u\n"
10069 "%s",
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010070 pCreateInfo->width, dev_data->phys_dev_properties.properties.limits.maxFramebufferWidth,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010071 validation_error_map[VALIDATION_ERROR_00413]);
10072 }
10073 if (pCreateInfo->height > dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight) {
10074 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10075 VALIDATION_ERROR_00414, "DS",
10076 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo height exceeds physical device limits. "
10077 "Requested height: %u, device max: %u\n"
10078 "%s",
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010079 pCreateInfo->height, dev_data->phys_dev_properties.properties.limits.maxFramebufferHeight,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010080 validation_error_map[VALIDATION_ERROR_00414]);
10081 }
10082 if (pCreateInfo->layers > dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers) {
10083 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10084 VALIDATION_ERROR_00415, "DS",
10085 "vkCreateFramebuffer(): Requested VkFramebufferCreateInfo layers exceeds physical device limits. "
10086 "Requested layers: %u, device max: %u\n"
10087 "%s",
10088 pCreateInfo->layers, dev_data->phys_dev_properties.properties.limits.maxFramebufferLayers,
10089 validation_error_map[VALIDATION_ERROR_00415]);
Tobin Ehlisa60950f2016-06-23 08:23:25 -060010090 }
Mark Lobodzinski67533742016-06-16 13:23:02 -060010091 return skip_call;
10092}
10093
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010094// Validate VkFramebufferCreateInfo state prior to calling down chain to create Framebuffer object
10095// Return true if an error is encountered and callback returns true to skip call down chain
10096// false indicates that call down chain should proceed
10097static bool PreCallValidateCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo) {
10098 // TODO : Verify that renderPass FB is created with is compatible with FB
10099 bool skip_call = false;
Tobin Ehlisd0945232016-06-22 10:02:02 -060010100 skip_call |= ValidateFramebufferCreateInfo(dev_data, pCreateInfo);
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010101 return skip_call;
10102}
10103
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010104// CreateFramebuffer state has been validated and call down chain completed so record new framebuffer object
10105static void PostCallRecordCreateFramebuffer(layer_data *dev_data, const VkFramebufferCreateInfo *pCreateInfo, VkFramebuffer fb) {
10106 // Shadow create info and store in map
Tobin Ehlis04c04272016-10-12 11:54:09 -060010107 std::unique_ptr<FRAMEBUFFER_STATE> fb_state(
10108 new FRAMEBUFFER_STATE(fb, pCreateInfo, dev_data->renderPassMap[pCreateInfo->renderPass]->createInfo.ptr()));
Tobin Ehlis042ec782016-06-23 14:25:22 -060010109
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010110 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10111 VkImageView view = pCreateInfo->pAttachments[i];
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010112 auto view_state = getImageViewState(dev_data, view);
10113 if (!view_state) {
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010114 continue;
10115 }
10116 MT_FB_ATTACHMENT_INFO fb_info;
Tobin Ehlis54108272016-10-11 14:26:49 -060010117 fb_info.mem = getImageState(dev_data, view_state->create_info.image)->binding.mem;
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -060010118 fb_info.view_state = view_state;
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010119 fb_info.image = view_state->create_info.image;
Tobin Ehlis04c04272016-10-12 11:54:09 -060010120 fb_state->attachments.push_back(fb_info);
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010121 }
Tobin Ehlis04c04272016-10-12 11:54:09 -060010122 dev_data->frameBufferMap[fb] = std::move(fb_state);
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010123}
10124
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010125VKAPI_ATTR VkResult VKAPI_CALL CreateFramebuffer(VkDevice device, const VkFramebufferCreateInfo *pCreateInfo,
10126 const VkAllocationCallbacks *pAllocator,
10127 VkFramebuffer *pFramebuffer) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010128 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010129 std::unique_lock<std::mutex> lock(global_lock);
10130 bool skip_call = PreCallValidateCreateFramebuffer(dev_data, pCreateInfo);
10131 lock.unlock();
Mark Lobodzinski67533742016-06-16 13:23:02 -060010132
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010133 if (skip_call)
10134 return VK_ERROR_VALIDATION_FAILED_EXT;
10135
Chris Forbesaaa9c282016-10-03 20:01:14 +130010136 VkResult result = dev_data->dispatch_table.CreateFramebuffer(device, pCreateInfo, pAllocator, pFramebuffer);
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010137
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010138 if (VK_SUCCESS == result) {
Tobin Ehlis7fad84d2016-06-22 09:06:48 -060010139 lock.lock();
Tobin Ehlisb3d26812016-06-22 09:20:42 -060010140 PostCallRecordCreateFramebuffer(dev_data, pCreateInfo, *pFramebuffer);
10141 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010142 }
10143 return result;
10144}
10145
Dustin Graves8f1eab92016-04-05 09:41:17 -060010146static bool FindDependency(const int index, const int dependent, const std::vector<DAGNode> &subpass_to_node,
10147 std::unordered_set<uint32_t> &processed_nodes) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010148 // If we have already checked this node we have not found a dependency path so return false.
10149 if (processed_nodes.count(index))
Dustin Graves8f1eab92016-04-05 09:41:17 -060010150 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010151 processed_nodes.insert(index);
10152 const DAGNode &node = subpass_to_node[index];
10153 // Look for a dependency path. If one exists return true else recurse on the previous nodes.
10154 if (std::find(node.prev.begin(), node.prev.end(), dependent) == node.prev.end()) {
10155 for (auto elem : node.prev) {
10156 if (FindDependency(elem, dependent, subpass_to_node, processed_nodes))
Dustin Graves8f1eab92016-04-05 09:41:17 -060010157 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010158 }
10159 } else {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010160 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010161 }
Dustin Graves8f1eab92016-04-05 09:41:17 -060010162 return false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010163}
10164
Chris Forbes3dd83742016-10-03 19:35:49 +130010165static bool CheckDependencyExists(const layer_data *dev_data, const int subpass, const std::vector<uint32_t> &dependent_subpasses,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010166 const std::vector<DAGNode> &subpass_to_node, bool &skip_call) {
10167 bool result = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010168 // Loop through all subpasses that share the same attachment and make sure a dependency exists
10169 for (uint32_t k = 0; k < dependent_subpasses.size(); ++k) {
Jamie Madill2b6b8d52016-04-04 15:09:51 -040010170 if (static_cast<uint32_t>(subpass) == dependent_subpasses[k])
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010171 continue;
10172 const DAGNode &node = subpass_to_node[subpass];
10173 // Check for a specified dependency between the two nodes. If one exists we are done.
10174 auto prev_elem = std::find(node.prev.begin(), node.prev.end(), dependent_subpasses[k]);
10175 auto next_elem = std::find(node.next.begin(), node.next.end(), dependent_subpasses[k]);
10176 if (prev_elem == node.prev.end() && next_elem == node.next.end()) {
Jan-Harald Fredriksen4b478382016-06-08 14:20:50 +020010177 // If no dependency exits an implicit dependency still might. If not, throw an error.
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010178 std::unordered_set<uint32_t> processed_nodes;
Jan-Harald Fredriksen4b478382016-06-08 14:20:50 +020010179 if (!(FindDependency(subpass, dependent_subpasses[k], subpass_to_node, processed_nodes) ||
10180 FindDependency(dependent_subpasses[k], subpass, subpass_to_node, processed_nodes))) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010181 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010182 __LINE__, DRAWSTATE_INVALID_RENDERPASS, "DS",
10183 "A dependency between subpasses %d and %d must exist but one is not specified.", subpass,
10184 dependent_subpasses[k]);
Dustin Graves8f1eab92016-04-05 09:41:17 -060010185 result = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010186 }
10187 }
10188 }
10189 return result;
10190}
10191
Chris Forbes3dd83742016-10-03 19:35:49 +130010192static bool CheckPreserved(const layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, const int index,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010193 const uint32_t attachment, const std::vector<DAGNode> &subpass_to_node, int depth, bool &skip_call) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010194 const DAGNode &node = subpass_to_node[index];
10195 // If this node writes to the attachment return true as next nodes need to preserve the attachment.
10196 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
10197 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10198 if (attachment == subpass.pColorAttachments[j].attachment)
Dustin Graves8f1eab92016-04-05 09:41:17 -060010199 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010200 }
10201 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10202 if (attachment == subpass.pDepthStencilAttachment->attachment)
Dustin Graves8f1eab92016-04-05 09:41:17 -060010203 return true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010204 }
Dustin Graves8f1eab92016-04-05 09:41:17 -060010205 bool result = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010206 // Loop through previous nodes and see if any of them write to the attachment.
10207 for (auto elem : node.prev) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010208 result |= CheckPreserved(dev_data, pCreateInfo, elem, attachment, subpass_to_node, depth + 1, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010209 }
10210 // If the attachment was written to by a previous node than this node needs to preserve it.
10211 if (result && depth > 0) {
10212 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[index];
Dustin Graves8f1eab92016-04-05 09:41:17 -060010213 bool has_preserved = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010214 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10215 if (subpass.pPreserveAttachments[j] == attachment) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010216 has_preserved = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010217 break;
10218 }
10219 }
Dustin Graves8f1eab92016-04-05 09:41:17 -060010220 if (!has_preserved) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010221 skip_call |=
Chris Forbes3dd83742016-10-03 19:35:49 +130010222 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010223 DRAWSTATE_INVALID_RENDERPASS, "DS",
10224 "Attachment %d is used by a later subpass and must be preserved in subpass %d.", attachment, index);
10225 }
10226 }
10227 return result;
10228}
10229
10230template <class T> bool isRangeOverlapping(T offset1, T size1, T offset2, T size2) {
10231 return (((offset1 + size1) > offset2) && ((offset1 + size1) < (offset2 + size2))) ||
10232 ((offset1 > offset2) && (offset1 < (offset2 + size2)));
10233}
10234
10235bool isRegionOverlapping(VkImageSubresourceRange range1, VkImageSubresourceRange range2) {
10236 return (isRangeOverlapping(range1.baseMipLevel, range1.levelCount, range2.baseMipLevel, range2.levelCount) &&
10237 isRangeOverlapping(range1.baseArrayLayer, range1.layerCount, range2.baseArrayLayer, range2.layerCount));
10238}
10239
Tobin Ehlis04c04272016-10-12 11:54:09 -060010240static bool ValidateDependencies(const layer_data *dev_data, FRAMEBUFFER_STATE const *framebuffer,
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010241 RENDER_PASS_STATE const *renderPass) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010242 bool skip_call = false;
Chris Forbesef730462016-09-27 12:03:31 +130010243 auto const pFramebufferInfo = framebuffer->createInfo.ptr();
10244 auto const pCreateInfo = renderPass->createInfo.ptr();
Chris Forbes967c4682016-05-17 11:36:23 +120010245 auto const & subpass_to_node = renderPass->subpassToNode;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010246 std::vector<std::vector<uint32_t>> output_attachment_to_subpass(pCreateInfo->attachmentCount);
10247 std::vector<std::vector<uint32_t>> input_attachment_to_subpass(pCreateInfo->attachmentCount);
10248 std::vector<std::vector<uint32_t>> overlapping_attachments(pCreateInfo->attachmentCount);
10249 // Find overlapping attachments
10250 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; ++i) {
10251 for (uint32_t j = i + 1; j < pCreateInfo->attachmentCount; ++j) {
10252 VkImageView viewi = pFramebufferInfo->pAttachments[i];
10253 VkImageView viewj = pFramebufferInfo->pAttachments[j];
10254 if (viewi == viewj) {
10255 overlapping_attachments[i].push_back(j);
10256 overlapping_attachments[j].push_back(i);
10257 continue;
10258 }
Chris Forbes3dd83742016-10-03 19:35:49 +130010259 auto view_state_i = getImageViewState(dev_data, viewi);
10260 auto view_state_j = getImageViewState(dev_data, viewj);
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010261 if (!view_state_i || !view_state_j) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010262 continue;
10263 }
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010264 auto view_ci_i = view_state_i->create_info;
10265 auto view_ci_j = view_state_j->create_info;
10266 if (view_ci_i.image == view_ci_j.image && isRegionOverlapping(view_ci_i.subresourceRange, view_ci_j.subresourceRange)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010267 overlapping_attachments[i].push_back(j);
10268 overlapping_attachments[j].push_back(i);
10269 continue;
10270 }
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010271 auto image_data_i = getImageState(dev_data, view_ci_i.image);
10272 auto image_data_j = getImageState(dev_data, view_ci_j.image);
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -060010273 if (!image_data_i || !image_data_j) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010274 continue;
10275 }
Tobin Ehlis54108272016-10-11 14:26:49 -060010276 if (image_data_i->binding.mem == image_data_j->binding.mem &&
10277 isRangeOverlapping(image_data_i->binding.offset, image_data_i->binding.size, image_data_j->binding.offset,
10278 image_data_j->binding.size)) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010279 overlapping_attachments[i].push_back(j);
10280 overlapping_attachments[j].push_back(i);
10281 }
10282 }
10283 }
10284 for (uint32_t i = 0; i < overlapping_attachments.size(); ++i) {
10285 uint32_t attachment = i;
10286 for (auto other_attachment : overlapping_attachments[i]) {
10287 if (!(pCreateInfo->pAttachments[attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010288 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10289 __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10290 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10291 attachment, other_attachment, validation_error_map[VALIDATION_ERROR_00324]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010292 }
10293 if (!(pCreateInfo->pAttachments[other_attachment].flags & VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010294 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
10295 __LINE__, VALIDATION_ERROR_00324, "DS", "Attachment %d aliases attachment %d but doesn't "
10296 "set VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT. %s",
10297 other_attachment, attachment, validation_error_map[VALIDATION_ERROR_00324]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010298 }
10299 }
10300 }
10301 // Find for each attachment the subpasses that use them.
Mark Youngf5bba552016-03-30 02:23:18 -060010302 unordered_set<uint32_t> attachmentIndices;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010303 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10304 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
Mark Youngf5bba552016-03-30 02:23:18 -060010305 attachmentIndices.clear();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010306 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10307 uint32_t attachment = subpass.pInputAttachments[j].attachment;
Chris Forbesac825182016-07-01 11:45:49 +120010308 if (attachment == VK_ATTACHMENT_UNUSED)
10309 continue;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010310 input_attachment_to_subpass[attachment].push_back(i);
10311 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10312 input_attachment_to_subpass[overlapping_attachment].push_back(i);
10313 }
10314 }
10315 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10316 uint32_t attachment = subpass.pColorAttachments[j].attachment;
Chris Forbesac825182016-07-01 11:45:49 +120010317 if (attachment == VK_ATTACHMENT_UNUSED)
10318 continue;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010319 output_attachment_to_subpass[attachment].push_back(i);
10320 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10321 output_attachment_to_subpass[overlapping_attachment].push_back(i);
10322 }
Mark Youngf5bba552016-03-30 02:23:18 -060010323 attachmentIndices.insert(attachment);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010324 }
10325 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10326 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10327 output_attachment_to_subpass[attachment].push_back(i);
10328 for (auto overlapping_attachment : overlapping_attachments[attachment]) {
10329 output_attachment_to_subpass[overlapping_attachment].push_back(i);
10330 }
Mark Youngf5bba552016-03-30 02:23:18 -060010331
10332 if (attachmentIndices.count(attachment)) {
10333 skip_call |=
Chris Forbes3dd83742016-10-03 19:35:49 +130010334 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10335 DRAWSTATE_INVALID_RENDERPASS, "DS",
10336 "Cannot use same attachment (%u) as both color and depth output in same subpass (%u).", attachment, i);
Mark Youngf5bba552016-03-30 02:23:18 -060010337 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010338 }
10339 }
10340 // If there is a dependency needed make sure one exists
10341 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10342 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10343 // If the attachment is an input then all subpasses that output must have a dependency relationship
10344 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbesac825182016-07-01 11:45:49 +120010345 uint32_t attachment = subpass.pInputAttachments[j].attachment;
10346 if (attachment == VK_ATTACHMENT_UNUSED)
10347 continue;
Chris Forbes3dd83742016-10-03 19:35:49 +130010348 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010349 }
10350 // If the attachment is an output then all subpasses that use the attachment must have a dependency relationship
10351 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
Chris Forbesac825182016-07-01 11:45:49 +120010352 uint32_t attachment = subpass.pColorAttachments[j].attachment;
10353 if (attachment == VK_ATTACHMENT_UNUSED)
10354 continue;
Chris Forbes3dd83742016-10-03 19:35:49 +130010355 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10356 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010357 }
10358 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10359 const uint32_t &attachment = subpass.pDepthStencilAttachment->attachment;
Chris Forbes3dd83742016-10-03 19:35:49 +130010360 CheckDependencyExists(dev_data, i, output_attachment_to_subpass[attachment], subpass_to_node, skip_call);
10361 CheckDependencyExists(dev_data, i, input_attachment_to_subpass[attachment], subpass_to_node, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010362 }
10363 }
10364 // Loop through implicit dependencies, if this pass reads make sure the attachment is preserved for all passes after it was
10365 // written.
10366 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10367 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10368 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010369 CheckPreserved(dev_data, pCreateInfo, i, subpass.pInputAttachments[j].attachment, subpass_to_node, 0, skip_call);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010370 }
10371 }
10372 return skip_call;
10373}
Tobin Ehlisdb11af62016-05-11 10:38:29 -060010374// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
10375// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that
10376// READ_ONLY layout attachments don't have CLEAR as their loadOp.
10377static bool ValidateLayoutVsAttachmentDescription(debug_report_data *report_data, const VkImageLayout first_layout,
10378 const uint32_t attachment,
10379 const VkAttachmentDescription &attachment_description) {
10380 bool skip_call = false;
10381 // Verify that initial loadOp on READ_ONLY attachments is not CLEAR
10382 if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
10383 if ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
10384 (first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010385 skip_call |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT,
10386 VkDebugReportObjectTypeEXT(0), __LINE__, VALIDATION_ERROR_02351, "DS",
10387 "Cannot clear attachment %d with invalid first layout %s. %s", attachment,
10388 string_VkImageLayout(first_layout), validation_error_map[VALIDATION_ERROR_02351]);
Tobin Ehlisdb11af62016-05-11 10:38:29 -060010389 }
10390 }
10391 return skip_call;
10392}
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010393
Chris Forbes3dd83742016-10-03 19:35:49 +130010394static bool ValidateLayouts(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010395 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010396
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010397 // Track when we're observing the first use of an attachment
10398 std::vector<bool> attach_first_use(pCreateInfo->attachmentCount, true);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010399 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10400 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010401 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10402 auto attach_index = subpass.pColorAttachments[j].attachment;
10403 if (attach_index == VK_ATTACHMENT_UNUSED)
10404 continue;
10405
Chris Forbesf02c1742016-08-10 10:41:27 +120010406 switch (subpass.pColorAttachments[j].layout) {
10407 case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010408 // This is ideal.
Chris Forbesf02c1742016-08-10 10:41:27 +120010409 break;
10410
10411 case VK_IMAGE_LAYOUT_GENERAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010412 // May not be optimal; TODO: reconsider this warning based on other constraints?
Chris Forbes3dd83742016-10-03 19:35:49 +130010413 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10414 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesf02c1742016-08-10 10:41:27 +120010415 "Layout for color attachment is GENERAL but should be COLOR_ATTACHMENT_OPTIMAL.");
10416 break;
10417
10418 default:
Chris Forbes3dd83742016-10-03 19:35:49 +130010419 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10420 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesf02c1742016-08-10 10:41:27 +120010421 "Layout for color attachment is %s but can only be COLOR_ATTACHMENT_OPTIMAL or GENERAL.",
10422 string_VkImageLayout(subpass.pColorAttachments[j].layout));
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010423 }
Chris Forbesf02c1742016-08-10 10:41:27 +120010424
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010425 if (attach_first_use[attach_index]) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010426 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pColorAttachments[j].layout,
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010427 attach_index, pCreateInfo->pAttachments[attach_index]);
10428 }
10429 attach_first_use[attach_index] = false;
10430 }
Chris Forbesf02c1742016-08-10 10:41:27 +120010431 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010432 switch (subpass.pDepthStencilAttachment->layout) {
10433 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
10434 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010435 // These are ideal.
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010436 break;
10437
10438 case VK_IMAGE_LAYOUT_GENERAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010439 // May not be optimal; TODO: reconsider this warning based on other constraints? GENERAL can be better than doing
10440 // a bunch of transitions.
Chris Forbes3dd83742016-10-03 19:35:49 +130010441 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10442 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010443 "GENERAL layout for depth attachment may not give optimal performance.");
10444 break;
10445
10446 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010447 // No other layouts are acceptable
Chris Forbes3dd83742016-10-03 19:35:49 +130010448 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
10449 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010450 "Layout for depth attachment is %s but can only be DEPTH_STENCIL_ATTACHMENT_OPTIMAL, "
10451 "DEPTH_STENCIL_READ_ONLY_OPTIMAL or GENERAL.",
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010452 string_VkImageLayout(subpass.pDepthStencilAttachment->layout));
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010453 }
Chris Forbesc5dd9bc2016-08-09 08:17:14 +120010454
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010455 auto attach_index = subpass.pDepthStencilAttachment->attachment;
10456 if (attach_first_use[attach_index]) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010457 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pDepthStencilAttachment->layout,
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010458 attach_index, pCreateInfo->pAttachments[attach_index]);
10459 }
10460 attach_first_use[attach_index] = false;
10461 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010462 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbesd9c27ca2016-07-01 11:28:00 +120010463 auto attach_index = subpass.pInputAttachments[j].attachment;
10464 if (attach_index == VK_ATTACHMENT_UNUSED)
10465 continue;
10466
Chris Forbesf02c1742016-08-10 10:41:27 +120010467 switch (subpass.pInputAttachments[j].layout) {
10468 case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
10469 case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010470 // These are ideal.
Chris Forbesf02c1742016-08-10 10:41:27 +120010471 break;
10472
10473 case VK_IMAGE_LAYOUT_GENERAL:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010474 // May not be optimal. TODO: reconsider this warning based on other constraints.
Chris Forbes3dd83742016-10-03 19:35:49 +130010475 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT,
10476 VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
Chris Forbesf02c1742016-08-10 10:41:27 +120010477 "Layout for input attachment is GENERAL but should be READ_ONLY_OPTIMAL.");
10478 break;
10479
10480 default:
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010481 // No other layouts are acceptable
Chris Forbes3dd83742016-10-03 19:35:49 +130010482 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbesf02c1742016-08-10 10:41:27 +120010483 DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS",
10484 "Layout for input attachment is %s but can only be READ_ONLY_OPTIMAL or GENERAL.",
10485 string_VkImageLayout(subpass.pInputAttachments[j].layout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010486 }
Chris Forbesf02c1742016-08-10 10:41:27 +120010487
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010488 if (attach_first_use[attach_index]) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010489 skip |= ValidateLayoutVsAttachmentDescription(dev_data->report_data, subpass.pInputAttachments[j].layout,
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010490 attach_index, pCreateInfo->pAttachments[attach_index]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010491 }
Tobin Ehlise2843ff2016-07-18 19:01:43 -060010492 attach_first_use[attach_index] = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010493 }
10494 }
10495 return skip;
10496}
10497
Chris Forbes3dd83742016-10-03 19:35:49 +130010498static bool CreatePassDAG(const layer_data *dev_data, VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010499 std::vector<DAGNode> &subpass_to_node, std::vector<bool> &has_self_dependency) {
10500 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010501 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10502 DAGNode &subpass_node = subpass_to_node[i];
10503 subpass_node.pass = i;
10504 }
10505 for (uint32_t i = 0; i < pCreateInfo->dependencyCount; ++i) {
10506 const VkSubpassDependency &dependency = pCreateInfo->pDependencies[i];
Chris Forbes7f075fb2016-09-27 10:54:58 +130010507 if (dependency.srcSubpass == VK_SUBPASS_EXTERNAL || dependency.dstSubpass == VK_SUBPASS_EXTERNAL) {
10508 if (dependency.srcSubpass == dependency.dstSubpass) {
10509 skip_call |=
Chris Forbes3dd83742016-10-03 19:35:49 +130010510 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbes7f075fb2016-09-27 10:54:58 +130010511 DRAWSTATE_INVALID_RENDERPASS, "DS", "The src and dest subpasses cannot both be external.");
10512 }
Chris Forbes7f075fb2016-09-27 10:54:58 +130010513 } else if (dependency.srcSubpass > dependency.dstSubpass) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010514 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010515 DRAWSTATE_INVALID_RENDERPASS, "DS",
10516 "Depedency graph must be specified such that an earlier pass cannot depend on a later pass.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010517 } else if (dependency.srcSubpass == dependency.dstSubpass) {
10518 has_self_dependency[dependency.srcSubpass] = true;
Chris Forbes585a32b2016-10-26 15:45:33 +130010519 } else {
10520 subpass_to_node[dependency.dstSubpass].prev.push_back(dependency.srcSubpass);
10521 subpass_to_node[dependency.srcSubpass].next.push_back(dependency.dstSubpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010522 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010523 }
10524 return skip_call;
10525}
Chris Forbes90da2e92016-03-18 16:30:03 +130010526
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010527
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010528VKAPI_ATTR VkResult VKAPI_CALL CreateShaderModule(VkDevice device, const VkShaderModuleCreateInfo *pCreateInfo,
10529 const VkAllocationCallbacks *pAllocator,
10530 VkShaderModule *pShaderModule) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010531 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Dustin Graves8f1eab92016-04-05 09:41:17 -060010532 bool skip_call = false;
Chris Forbesb4afd0f2016-04-04 10:48:35 +120010533
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070010534 // Use SPIRV-Tools validator to try and catch any issues with the module itself
Chris Forbesb4afd0f2016-04-04 10:48:35 +120010535 spv_context ctx = spvContextCreate(SPV_ENV_VULKAN_1_0);
10536 spv_const_binary_t binary { pCreateInfo->pCode, pCreateInfo->codeSize / sizeof(uint32_t) };
10537 spv_diagnostic diag = nullptr;
10538
10539 auto result = spvValidate(ctx, &binary, &diag);
10540 if (result != SPV_SUCCESS) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010541 skip_call |=
10542 log_msg(dev_data->report_data, result == SPV_WARNING ? VK_DEBUG_REPORT_WARNING_BIT_EXT : VK_DEBUG_REPORT_ERROR_BIT_EXT,
10543 VkDebugReportObjectTypeEXT(0), 0, __LINE__, SHADER_CHECKER_INCONSISTENT_SPIRV, "SC",
10544 "SPIR-V module not valid: %s", diag && diag->error ? diag->error : "(no error text)");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010545 }
10546
Chris Forbesb4afd0f2016-04-04 10:48:35 +120010547 spvDiagnosticDestroy(diag);
10548 spvContextDestroy(ctx);
10549
Dustin Graves8f1eab92016-04-05 09:41:17 -060010550 if (skip_call)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010551 return VK_ERROR_VALIDATION_FAILED_EXT;
10552
Chris Forbesaaa9c282016-10-03 20:01:14 +130010553 VkResult res = dev_data->dispatch_table.CreateShaderModule(device, pCreateInfo, pAllocator, pShaderModule);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010554
10555 if (res == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010556 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130010557 dev_data->shaderModuleMap[*pShaderModule] = unique_ptr<shader_module>(new shader_module(pCreateInfo));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010558 }
10559 return res;
10560}
10561
Mark Lobodzinski97810702016-06-17 15:24:01 -060010562static bool ValidateAttachmentIndex(layer_data *dev_data, uint32_t attachment, uint32_t attachment_count, const char *type) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010563 bool skip_call = false;
Mark Lobodzinski97810702016-06-17 15:24:01 -060010564 if (attachment >= attachment_count && attachment != VK_ATTACHMENT_UNUSED) {
10565 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbes2d9b2a82016-11-21 10:45:39 +130010566 VALIDATION_ERROR_00325, "DS",
10567 "CreateRenderPass: %s attachment %d must be less than the total number of attachments %d. %s",
10568 type, attachment, attachment_count, validation_error_map[VALIDATION_ERROR_00325]);
Mark Lobodzinski97810702016-06-17 15:24:01 -060010569 }
10570 return skip_call;
10571}
10572
Chris Forbes689010b2016-06-29 15:12:29 +120010573static bool IsPowerOfTwo(unsigned x) {
10574 return x && !(x & (x-1));
10575}
10576
Mark Lobodzinski97810702016-06-17 15:24:01 -060010577static bool ValidateRenderpassAttachmentUsage(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo) {
10578 bool skip_call = false;
10579 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10580 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
10581 if (subpass.pipelineBindPoint != VK_PIPELINE_BIND_POINT_GRAPHICS) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010582 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10583 VALIDATION_ERROR_00347, "DS",
10584 "CreateRenderPass: Pipeline bind point for subpass %d must be VK_PIPELINE_BIND_POINT_GRAPHICS. %s",
10585 i, validation_error_map[VALIDATION_ERROR_00347]);
Mark Lobodzinski97810702016-06-17 15:24:01 -060010586 }
10587 for (uint32_t j = 0; j < subpass.preserveAttachmentCount; ++j) {
10588 uint32_t attachment = subpass.pPreserveAttachments[j];
10589 if (attachment == VK_ATTACHMENT_UNUSED) {
10590 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010591 __LINE__, VALIDATION_ERROR_00356, "DS",
10592 "CreateRenderPass: Preserve attachment (%d) must not be VK_ATTACHMENT_UNUSED. %s", j,
10593 validation_error_map[VALIDATION_ERROR_00356]);
Mark Lobodzinski97810702016-06-17 15:24:01 -060010594 } else {
10595 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Preserve");
10596 }
10597 }
Chris Forbesd59af1e2016-06-29 11:50:31 +120010598
10599 auto subpass_performs_resolve = subpass.pResolveAttachments && std::any_of(
10600 subpass.pResolveAttachments, subpass.pResolveAttachments + subpass.colorAttachmentCount,
10601 [](VkAttachmentReference ref) { return ref.attachment != VK_ATTACHMENT_UNUSED; });
10602
Chris Forbes689010b2016-06-29 15:12:29 +120010603 unsigned sample_count = 0;
10604
Mark Lobodzinski97810702016-06-17 15:24:01 -060010605 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
10606 uint32_t attachment;
10607 if (subpass.pResolveAttachments) {
10608 attachment = subpass.pResolveAttachments[j].attachment;
10609 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Resolve");
Chris Forbesd59af1e2016-06-29 11:50:31 +120010610
10611 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED &&
10612 pCreateInfo->pAttachments[attachment].samples != VK_SAMPLE_COUNT_1_BIT) {
10613 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010614 __LINE__, VALIDATION_ERROR_00352, "DS",
Chris Forbesd59af1e2016-06-29 11:50:31 +120010615 "CreateRenderPass: Subpass %u requests multisample resolve into attachment %u, "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010616 "which must have VK_SAMPLE_COUNT_1_BIT but has %s. %s",
10617 i, attachment, string_VkSampleCountFlagBits(pCreateInfo->pAttachments[attachment].samples),
10618 validation_error_map[VALIDATION_ERROR_00352]);
Chris Forbesd59af1e2016-06-29 11:50:31 +120010619 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010620 }
10621 attachment = subpass.pColorAttachments[j].attachment;
10622 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Color");
Chris Forbesd59af1e2016-06-29 11:50:31 +120010623
Chris Forbes689010b2016-06-29 15:12:29 +120010624 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10625 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
Chris Forbes689010b2016-06-29 15:12:29 +120010626
Chris Forbes6655bb32016-07-01 18:27:30 +120010627 if (subpass_performs_resolve &&
10628 pCreateInfo->pAttachments[attachment].samples == VK_SAMPLE_COUNT_1_BIT) {
10629 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010630 __LINE__, VALIDATION_ERROR_00351, "DS",
Chris Forbes6655bb32016-07-01 18:27:30 +120010631 "CreateRenderPass: Subpass %u requests multisample resolve from attachment %u "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010632 "which has VK_SAMPLE_COUNT_1_BIT. %s",
10633 i, attachment, validation_error_map[VALIDATION_ERROR_00351]);
Chris Forbes6655bb32016-07-01 18:27:30 +120010634 }
Chris Forbesd59af1e2016-06-29 11:50:31 +120010635 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010636 }
Chris Forbes6655bb32016-07-01 18:27:30 +120010637
Mark Lobodzinski97810702016-06-17 15:24:01 -060010638 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10639 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
10640 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Depth stencil");
Chris Forbes689010b2016-06-29 15:12:29 +120010641
10642 if (!skip_call && attachment != VK_ATTACHMENT_UNUSED) {
10643 sample_count |= (unsigned)pCreateInfo->pAttachments[attachment].samples;
10644 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010645 }
Chris Forbes6655bb32016-07-01 18:27:30 +120010646
Mark Lobodzinski97810702016-06-17 15:24:01 -060010647 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10648 uint32_t attachment = subpass.pInputAttachments[j].attachment;
10649 skip_call |= ValidateAttachmentIndex(dev_data, attachment, pCreateInfo->attachmentCount, "Input");
Chris Forbes3e4d7ac2016-05-06 16:11:03 +120010650 }
Chris Forbes689010b2016-06-29 15:12:29 +120010651
10652 if (sample_count && !IsPowerOfTwo(sample_count)) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010653 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VkDebugReportObjectTypeEXT(0), 0, __LINE__,
10654 VALIDATION_ERROR_00337, "DS", "CreateRenderPass: Subpass %u attempts to render to "
10655 "attachments with inconsistent sample counts. %s",
10656 i, validation_error_map[VALIDATION_ERROR_00337]);
Chris Forbes689010b2016-06-29 15:12:29 +120010657 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010658 }
Mark Lobodzinski97810702016-06-17 15:24:01 -060010659 return skip_call;
10660}
10661
10662VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo,
10663 const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) {
Mark Lobodzinski97810702016-06-17 15:24:01 -060010664 bool skip_call = false;
10665 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
10666
10667 std::unique_lock<std::mutex> lock(global_lock);
10668
Mark Lobodzinski97810702016-06-17 15:24:01 -060010669 // TODO: As part of wrapping up the mem_tracker/core_validation merge the following routine should be consolidated with
10670 // ValidateLayouts.
10671 skip_call |= ValidateRenderpassAttachmentUsage(dev_data, pCreateInfo);
Chris Forbes71251362016-11-16 14:30:05 +130010672 if (!skip_call) {
10673 skip_call |= ValidateLayouts(dev_data, device, pCreateInfo);
10674 }
Chris Forbesbf4a7722016-06-21 18:30:46 +120010675 lock.unlock();
Mark Lobodzinski97810702016-06-17 15:24:01 -060010676
10677 if (skip_call) {
10678 return VK_ERROR_VALIDATION_FAILED_EXT;
10679 }
10680
Chris Forbesaaa9c282016-10-03 20:01:14 +130010681 VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass);
Chris Forbesbf4a7722016-06-21 18:30:46 +120010682
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010683 if (VK_SUCCESS == result) {
Mark Lobodzinski97810702016-06-17 15:24:01 -060010684 lock.lock();
10685
10686 std::vector<bool> has_self_dependency(pCreateInfo->subpassCount);
10687 std::vector<DAGNode> subpass_to_node(pCreateInfo->subpassCount);
10688 skip_call |= CreatePassDAG(dev_data, device, pCreateInfo, subpass_to_node, has_self_dependency);
10689
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010690 auto render_pass = unique_ptr<RENDER_PASS_STATE>(new RENDER_PASS_STATE(pCreateInfo));
Chris Forbescad81b82016-05-06 16:11:29 +120010691 render_pass->renderPass = *pRenderPass;
Chris Forbes3e4d7ac2016-05-06 16:11:03 +120010692 render_pass->hasSelfDependency = has_self_dependency;
10693 render_pass->subpassToNode = subpass_to_node;
Chris Forbescc836ab2016-09-26 17:04:41 +130010694
Tobin Ehlis54948522016-03-22 13:50:21 -060010695 // TODO: Maybe fill list and then copy instead of locking
Chris Forbes3e4d7ac2016-05-06 16:11:03 +120010696 std::unordered_map<uint32_t, bool> &attachment_first_read = render_pass->attachment_first_read;
Mark Lobodzinski67533742016-06-16 13:23:02 -060010697 std::unordered_map<uint32_t, VkImageLayout> &attachment_first_layout = render_pass->attachment_first_layout;
Tobin Ehlis54948522016-03-22 13:50:21 -060010698 for (uint32_t i = 0; i < pCreateInfo->subpassCount; ++i) {
10699 const VkSubpassDescription &subpass = pCreateInfo->pSubpasses[i];
Tobin Ehlis54948522016-03-22 13:50:21 -060010700 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
Mark Lobodzinski97810702016-06-17 15:24:01 -060010701 uint32_t attachment = subpass.pColorAttachments[j].attachment;
10702 if (!attachment_first_read.count(attachment)) {
10703 attachment_first_read.insert(std::make_pair(attachment, false));
10704 attachment_first_layout.insert(std::make_pair(attachment, subpass.pColorAttachments[j].layout));
Michael Lentine5007eb92016-04-05 11:38:12 -050010705 }
Tobin Ehlis54948522016-03-22 13:50:21 -060010706 }
10707 if (subpass.pDepthStencilAttachment && subpass.pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) {
10708 uint32_t attachment = subpass.pDepthStencilAttachment->attachment;
Mark Lobodzinski97810702016-06-17 15:24:01 -060010709 if (!attachment_first_read.count(attachment)) {
10710 attachment_first_read.insert(std::make_pair(attachment, false));
10711 attachment_first_layout.insert(std::make_pair(attachment, subpass.pDepthStencilAttachment->layout));
Michael Lentine9e1290e2016-03-31 14:45:20 -050010712 }
Tobin Ehlis54948522016-03-22 13:50:21 -060010713 }
Michael Lentine4e8777c2016-03-31 13:46:44 -050010714 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
10715 uint32_t attachment = subpass.pInputAttachments[j].attachment;
Mark Lobodzinski97810702016-06-17 15:24:01 -060010716 if (!attachment_first_read.count(attachment)) {
10717 attachment_first_read.insert(std::make_pair(attachment, true));
10718 attachment_first_layout.insert(std::make_pair(attachment, subpass.pInputAttachments[j].layout));
Michael Lentine9e1290e2016-03-31 14:45:20 -050010719 }
Michael Lentine4e8777c2016-03-31 13:46:44 -050010720 }
Tobin Ehlis54948522016-03-22 13:50:21 -060010721 }
Chris Forbescc836ab2016-09-26 17:04:41 +130010722
Chris Forbesef730462016-09-27 12:03:31 +130010723 dev_data->renderPassMap[*pRenderPass] = std::move(render_pass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010724 }
10725 return result;
10726}
Mark Lobodzinski97810702016-06-17 15:24:01 -060010727
Chris Forbesb065df02016-05-17 15:45:31 +120010728static bool VerifyFramebufferAndRenderPassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010729 bool skip_call = false;
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010730 auto const pRenderPassInfo = getRenderPassState(dev_data, pRenderPassBegin->renderPass)->createInfo.ptr();
Chris Forbesef27c5b2016-09-27 12:03:04 +130010731 auto const & framebufferInfo = dev_data->frameBufferMap[pRenderPassBegin->framebuffer]->createInfo;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010732 if (pRenderPassInfo->attachmentCount != framebufferInfo.attachmentCount) {
10733 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
10734 DRAWSTATE_INVALID_RENDERPASS, "DS", "You cannot start a render pass using a framebuffer "
10735 "with a different number of attachments.");
10736 }
10737 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
10738 const VkImageView &image_view = framebufferInfo.pAttachments[i];
Tobin Ehlis8b26a382016-09-14 08:02:49 -060010739 auto view_state = getImageViewState(dev_data, image_view);
10740 assert(view_state);
10741 const VkImage &image = view_state->create_info.image;
10742 const VkImageSubresourceRange &subRange = view_state->create_info.subresourceRange;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010743 IMAGE_CMD_BUF_LAYOUT_NODE newNode = {pRenderPassInfo->pAttachments[i].initialLayout,
10744 pRenderPassInfo->pAttachments[i].initialLayout};
10745 // TODO: Do not iterate over every possibility - consolidate where possible
10746 for (uint32_t j = 0; j < subRange.levelCount; j++) {
10747 uint32_t level = subRange.baseMipLevel + j;
10748 for (uint32_t k = 0; k < subRange.layerCount; k++) {
10749 uint32_t layer = subRange.baseArrayLayer + k;
10750 VkImageSubresource sub = {subRange.aspectMask, level, layer};
10751 IMAGE_CMD_BUF_LAYOUT_NODE node;
10752 if (!FindLayout(pCB, image, sub, node)) {
10753 SetLayout(pCB, image, sub, newNode);
10754 continue;
10755 }
Chris Forbese8d02092016-06-30 12:43:36 +120010756 if (newNode.layout != VK_IMAGE_LAYOUT_UNDEFINED &&
10757 newNode.layout != node.layout) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010758 skip_call |=
10759 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Chris Forbese8d02092016-06-30 12:43:36 +120010760 DRAWSTATE_INVALID_RENDERPASS, "DS",
10761 "You cannot start a render pass using attachment %u "
10762 "where the render pass initial layout is %s and the previous "
10763 "known layout of the attachment is %s. The layouts must match, or "
10764 "the render pass initial layout for the attachment must be "
10765 "VK_IMAGE_LAYOUT_UNDEFINED",
Michael Lentine8f6abe72016-03-24 21:36:53 -050010766 i, string_VkImageLayout(newNode.layout), string_VkImageLayout(node.layout));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010767 }
10768 }
10769 }
10770 }
10771 return skip_call;
10772}
10773
Tobin Ehlis04c04272016-10-12 11:54:09 -060010774static void TransitionAttachmentRefLayout(layer_data *dev_data, GLOBAL_CB_NODE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
10775 VkAttachmentReference ref) {
Chris Forbes853022c2016-06-30 16:56:15 +120010776 if (ref.attachment != VK_ATTACHMENT_UNUSED) {
10777 auto image_view = pFramebuffer->createInfo.pAttachments[ref.attachment];
10778 SetLayout(dev_data, pCB, image_view, ref.layout);
10779 }
10780}
10781
Chris Forbes59b36542016-05-17 15:31:00 +120010782static void TransitionSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
Dustin Graves8f1eab92016-04-05 09:41:17 -060010783 const int subpass_index) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010784 auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
Chris Forbes967c4682016-05-17 11:36:23 +120010785 if (!renderPass)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010786 return;
Chris Forbes967c4682016-05-17 11:36:23 +120010787
Tobin Ehlis04c04272016-10-12 11:54:09 -060010788 auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
Chris Forbesb065df02016-05-17 15:45:31 +120010789 if (!framebuffer)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010790 return;
Chris Forbesb065df02016-05-17 15:45:31 +120010791
Chris Forbesef730462016-09-27 12:03:31 +130010792 auto const &subpass = renderPass->createInfo.pSubpasses[subpass_index];
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010793 for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
Chris Forbes853022c2016-06-30 16:56:15 +120010794 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pInputAttachments[j]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010795 }
10796 for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
Chris Forbes853022c2016-06-30 16:56:15 +120010797 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, subpass.pColorAttachments[j]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010798 }
Chris Forbes853022c2016-06-30 16:56:15 +120010799 if (subpass.pDepthStencilAttachment) {
10800 TransitionAttachmentRefLayout(dev_data, pCB, framebuffer, *subpass.pDepthStencilAttachment);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010801 }
10802}
10803
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010804static bool validatePrimaryCommandBuffer(const layer_data *dev_data, const GLOBAL_CB_NODE *pCB, const std::string &cmd_name,
10805 UNIQUE_VALIDATION_ERROR_CODE error_code) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060010806 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010807 if (pCB->createInfo.level != VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
Chris Forbes3dd83742016-10-03 19:35:49 +130010808 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010809 error_code, "DS", "Cannot execute command %s on a secondary command buffer. %s", cmd_name.c_str(),
10810 validation_error_map[error_code]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010811 }
10812 return skip_call;
10813}
10814
Chris Forbes05e03b72016-05-17 15:27:58 +120010815static void TransitionFinalSubpassLayouts(layer_data *dev_data, GLOBAL_CB_NODE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010816 auto renderPass = getRenderPassState(dev_data, pRenderPassBegin->renderPass);
Chris Forbes967c4682016-05-17 11:36:23 +120010817 if (!renderPass)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010818 return;
Chris Forbes967c4682016-05-17 11:36:23 +120010819
Chris Forbesef730462016-09-27 12:03:31 +130010820 const VkRenderPassCreateInfo *pRenderPassInfo = renderPass->createInfo.ptr();
Tobin Ehlis04c04272016-10-12 11:54:09 -060010821 auto framebuffer = getFramebufferState(dev_data, pRenderPassBegin->framebuffer);
Chris Forbesb065df02016-05-17 15:45:31 +120010822 if (!framebuffer)
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010823 return;
Chris Forbesb065df02016-05-17 15:45:31 +120010824
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010825 for (uint32_t i = 0; i < pRenderPassInfo->attachmentCount; ++i) {
Chris Forbes853022c2016-06-30 16:56:15 +120010826 auto image_view = framebuffer->createInfo.pAttachments[i];
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010827 SetLayout(dev_data, pCB, image_view, pRenderPassInfo->pAttachments[i].finalLayout);
10828 }
10829}
10830
Chris Forbes3dd83742016-10-03 19:35:49 +130010831static bool VerifyRenderAreaBounds(const layer_data *dev_data, const VkRenderPassBeginInfo *pRenderPassBegin) {
Michael Lentined4648812016-03-24 20:48:59 -050010832 bool skip_call = false;
Tobin Ehlis04c04272016-10-12 11:54:09 -060010833 const safe_VkFramebufferCreateInfo *pFramebufferInfo =
10834 &getFramebufferState(dev_data, pRenderPassBegin->framebuffer)->createInfo;
Michael Lentined4648812016-03-24 20:48:59 -050010835 if (pRenderPassBegin->renderArea.offset.x < 0 ||
10836 (pRenderPassBegin->renderArea.offset.x + pRenderPassBegin->renderArea.extent.width) > pFramebufferInfo->width ||
10837 pRenderPassBegin->renderArea.offset.y < 0 ||
10838 (pRenderPassBegin->renderArea.offset.y + pRenderPassBegin->renderArea.extent.height) > pFramebufferInfo->height) {
10839 skip_call |= static_cast<bool>(log_msg(
Chris Forbes3dd83742016-10-03 19:35:49 +130010840 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Michael Lentined4648812016-03-24 20:48:59 -050010841 DRAWSTATE_INVALID_RENDER_AREA, "CORE",
10842 "Cannot execute a render pass with renderArea not within the bound of the "
10843 "framebuffer. RenderArea: x %d, y %d, width %d, height %d. Framebuffer: width %d, "
10844 "height %d.",
10845 pRenderPassBegin->renderArea.offset.x, pRenderPassBegin->renderArea.offset.y, pRenderPassBegin->renderArea.extent.width,
10846 pRenderPassBegin->renderArea.extent.height, pFramebufferInfo->width, pFramebufferInfo->height));
10847 }
10848 return skip_call;
10849}
10850
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010851// If this is a stencil format, make sure the stencil[Load|Store]Op flag is checked, while if it is a depth/color attachment the
10852// [load|store]Op flag must be checked
10853// TODO: The memory valid flag in DEVICE_MEM_INFO should probably be split to track the validity of stencil memory separately.
10854template <typename T> static bool FormatSpecificLoadAndStoreOpSettings(VkFormat format, T color_depth_op, T stencil_op, T op) {
Mark Lobodzinski323c6ba2016-06-21 10:28:30 -060010855 if (color_depth_op != op && stencil_op != op) {
10856 return false;
10857 }
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010858 bool check_color_depth_load_op = !vk_format_is_stencil_only(format);
10859 bool check_stencil_load_op = vk_format_is_depth_and_stencil(format) || !check_color_depth_load_op;
Mark Lobodzinski323c6ba2016-06-21 10:28:30 -060010860
10861 return (((check_color_depth_load_op == true) && (color_depth_op == op)) ||
10862 ((check_stencil_load_op == true) && (stencil_op == op)));
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010863}
10864
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010865VKAPI_ATTR void VKAPI_CALL
10866CmdBeginRenderPass(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo *pRenderPassBegin, VkSubpassContents contents) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060010867 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010868 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010869 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010870 GLOBAL_CB_NODE *cb_node = getCBNode(dev_data, commandBuffer);
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060010871 auto renderPass = pRenderPassBegin ? getRenderPassState(dev_data, pRenderPassBegin->renderPass) : nullptr;
Tobin Ehlis04c04272016-10-12 11:54:09 -060010872 auto framebuffer = pRenderPassBegin ? getFramebufferState(dev_data, pRenderPassBegin->framebuffer) : nullptr;
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010873 if (cb_node) {
Chris Forbes967c4682016-05-17 11:36:23 +120010874 if (renderPass) {
Tobin Ehlis7f0416c2016-07-15 16:01:13 -060010875 uint32_t clear_op_size = 0; // Make sure pClearValues is at least as large as last LOAD_OP_CLEAR
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010876 cb_node->activeFramebuffer = pRenderPassBegin->framebuffer;
Mark Lobodzinski4dc768c2016-10-03 16:01:12 -060010877 for (uint32_t i = 0; i < renderPass->createInfo.attachmentCount; ++i) {
Chris Forbes05e03b72016-05-17 15:27:58 +120010878 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
Chris Forbesef730462016-09-27 12:03:31 +130010879 auto pAttachment = &renderPass->createInfo.pAttachments[i];
Chris Forbescc836ab2016-09-26 17:04:41 +130010880 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10881 pAttachment->stencilLoadOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010882 VK_ATTACHMENT_LOAD_OP_CLEAR)) {
Mark Lobodzinskia8bbfde2016-07-20 10:02:23 -060010883 clear_op_size = static_cast<uint32_t>(i) + 1;
Chris Forbes967c4682016-05-17 11:36:23 +120010884 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010885 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
Chris Forbes967c4682016-05-17 11:36:23 +120010886 return false;
10887 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010888 cb_node->validate_functions.push_back(function);
Chris Forbescc836ab2016-09-26 17:04:41 +130010889 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10890 pAttachment->stencilLoadOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010891 VK_ATTACHMENT_LOAD_OP_DONT_CARE)) {
Chris Forbes967c4682016-05-17 11:36:23 +120010892 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010893 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
Chris Forbes967c4682016-05-17 11:36:23 +120010894 return false;
10895 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010896 cb_node->validate_functions.push_back(function);
Chris Forbescc836ab2016-09-26 17:04:41 +130010897 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->loadOp,
10898 pAttachment->stencilLoadOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060010899 VK_ATTACHMENT_LOAD_OP_LOAD)) {
Chris Forbes967c4682016-05-17 11:36:23 +120010900 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010901 return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
Tobin Ehlis5d461152016-08-10 19:11:54 -060010902 "vkCmdBeginRenderPass()");
Chris Forbes967c4682016-05-17 11:36:23 +120010903 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010904 cb_node->validate_functions.push_back(function);
Chris Forbes967c4682016-05-17 11:36:23 +120010905 }
Chris Forbescc836ab2016-09-26 17:04:41 +130010906 if (renderPass->attachment_first_read[i]) {
Chris Forbes967c4682016-05-17 11:36:23 +120010907 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060010908 return ValidateImageMemoryIsValid(dev_data, getImageState(dev_data, fb_info.image),
Tobin Ehlis5d461152016-08-10 19:11:54 -060010909 "vkCmdBeginRenderPass()");
Chris Forbes967c4682016-05-17 11:36:23 +120010910 };
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010911 cb_node->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010912 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010913 }
Tobin Ehlis7f0416c2016-07-15 16:01:13 -060010914 if (clear_op_size > pRenderPassBegin->clearValueCount) {
Slawomir Cygan0808f392016-11-28 17:53:23 +010010915 skip_call |= log_msg(
10916 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10917 reinterpret_cast<uint64_t &>(renderPass), __LINE__, VALIDATION_ERROR_00442,
10918 "DS", "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but there must "
10919 "be at least %u entries in pClearValues array to account for the highest index attachment in renderPass "
10920 "0x%" PRIx64 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u. Note that the pClearValues array "
10921 "is indexed by attachment number so even if some pClearValues entries between 0 and %u correspond to "
10922 "attachments that aren't cleared they will be ignored. %s",
10923 pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size,
10924 clear_op_size - 1, validation_error_map[VALIDATION_ERROR_00442]);
10925 }
10926 if (clear_op_size < pRenderPassBegin->clearValueCount) {
10927 skip_call |= log_msg(
10928 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT,
10929 reinterpret_cast<uint64_t &>(renderPass), __LINE__, DRAWSTATE_RENDERPASS_TOO_MANY_CLEAR_VALUES, "DS",
10930 "In vkCmdBeginRenderPass() the VkRenderPassBeginInfo struct has a clearValueCount of %u but only first %u "
10931 "entries in pClearValues array are used. The highest index attachment in renderPass 0x%" PRIx64
10932 " that uses VK_ATTACHMENT_LOAD_OP_CLEAR is %u - other pClearValues are ignored.",
10933 pRenderPassBegin->clearValueCount, clear_op_size, reinterpret_cast<uint64_t &>(renderPass), clear_op_size);
Tobin Ehlis74aa54a2016-05-31 13:06:24 -060010934 }
Tobin Ehlisfe871282016-06-28 10:28:02 -060010935 skip_call |= VerifyRenderAreaBounds(dev_data, pRenderPassBegin);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010936 skip_call |= VerifyFramebufferAndRenderPassLayouts(dev_data, cb_node, pRenderPassBegin);
Mike Weiblen6daea5b2016-12-19 20:41:58 -070010937 skip_call |= insideRenderPass(dev_data, cb_node, "vkCmdBeginRenderPass()", VALIDATION_ERROR_00440);
Tobin Ehlisfe871282016-06-28 10:28:02 -060010938 skip_call |= ValidateDependencies(dev_data, framebuffer, renderPass);
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010939 skip_call |= validatePrimaryCommandBuffer(dev_data, cb_node, "vkCmdBeginRenderPass", VALIDATION_ERROR_00441);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070010940 skip_call |= ValidateCmd(dev_data, cb_node, CMD_BEGINRENDERPASS, "vkCmdBeginRenderPass()");
10941 UpdateCmdBufferLastCmd(dev_data, cb_node, CMD_BEGINRENDERPASS);
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010942 cb_node->activeRenderPass = renderPass;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010943 // This is a shallow copy as that is all that is needed for now
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010944 cb_node->activeRenderPassBeginInfo = *pRenderPassBegin;
10945 cb_node->activeSubpass = 0;
10946 cb_node->activeSubpassContents = contents;
10947 cb_node->framebuffers.insert(pRenderPassBegin->framebuffer);
Tobin Ehlis9fc34aa2016-09-14 15:26:09 -060010948 // Connect this framebuffer and its children to this cmdBuffer
10949 AddFramebufferBinding(dev_data, cb_node, framebuffer);
Chris Forbesfb2aae32016-06-30 15:42:41 +120010950 // transition attachments to the correct layouts for the first subpass
Tobin Ehlisc7d4ff12016-09-06 20:40:29 -060010951 TransitionSubpassLayouts(dev_data, cb_node, &cb_node->activeRenderPassBeginInfo, cb_node->activeSubpass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010952 }
10953 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010954 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060010955 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130010956 dev_data->dispatch_table.CmdBeginRenderPass(commandBuffer, pRenderPassBegin, contents);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010957 }
10958}
10959
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010960VKAPI_ATTR void VKAPI_CALL CmdNextSubpass(VkCommandBuffer commandBuffer, VkSubpassContents contents) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060010961 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010962 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010963 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010964 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010965 if (pCB) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010966 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdNextSubpass", VALIDATION_ERROR_00459);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070010967 skip_call |= ValidateCmd(dev_data, pCB, CMD_NEXTSUBPASS, "vkCmdNextSubpass()");
10968 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_NEXTSUBPASS);
Mike Weiblen6daea5b2016-12-19 20:41:58 -070010969 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdNextSubpass()", VALIDATION_ERROR_00458);
Chris Forbes0948afa2016-09-07 11:44:08 +120010970
Chris Forbesef730462016-09-27 12:03:31 +130010971 auto subpassCount = pCB->activeRenderPass->createInfo.subpassCount;
Chris Forbes0948afa2016-09-07 11:44:08 +120010972 if (pCB->activeSubpass == subpassCount - 1) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070010973 skip_call |= log_msg(
10974 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
10975 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00453, "DS",
10976 "vkCmdNextSubpass(): Attempted to advance beyond final subpass. %s", validation_error_map[VALIDATION_ERROR_00453]);
Chris Forbes0948afa2016-09-07 11:44:08 +120010977 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010978 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010979 lock.unlock();
Chris Forbes2784bcb2016-09-07 11:43:52 +120010980
10981 if (skip_call)
10982 return;
10983
Chris Forbesaaa9c282016-10-03 20:01:14 +130010984 dev_data->dispatch_table.CmdNextSubpass(commandBuffer, contents);
Chris Forbes2784bcb2016-09-07 11:43:52 +120010985
10986 if (pCB) {
10987 lock.lock();
10988 pCB->activeSubpass++;
10989 pCB->activeSubpassContents = contents;
10990 TransitionSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo, pCB->activeSubpass);
10991 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010992}
10993
Chia-I Wu629d7cd2016-05-06 11:32:54 +080010994VKAPI_ATTR void VKAPI_CALL CmdEndRenderPass(VkCommandBuffer commandBuffer) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060010995 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070010996 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060010997 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbesc785a102016-05-17 14:59:22 +120010998 auto pCB = getCBNode(dev_data, commandBuffer);
10999 if (pCB) {
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011000 RENDER_PASS_STATE *rp_state = pCB->activeRenderPass;
Tobin Ehlis04c04272016-10-12 11:54:09 -060011001 auto framebuffer = getFramebufferState(dev_data, pCB->activeFramebuffer);
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011002 if (rp_state) {
11003 if (pCB->activeSubpass != rp_state->createInfo.subpassCount - 1) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011004 skip_call |= log_msg(
11005 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11006 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, VALIDATION_ERROR_00460, "DS",
11007 "vkCmdEndRenderPass(): Called before reaching final subpass. %s", validation_error_map[VALIDATION_ERROR_00460]);
Chris Forbes85bb4002016-09-07 14:08:31 +120011008 }
11009
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011010 for (size_t i = 0; i < rp_state->createInfo.attachmentCount; ++i) {
Chris Forbesb065df02016-05-17 15:45:31 +120011011 MT_FB_ATTACHMENT_INFO &fb_info = framebuffer->attachments[i];
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011012 auto pAttachment = &rp_state->createInfo.pAttachments[i];
Chris Forbescc836ab2016-09-26 17:04:41 +130011013 if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11014 pAttachment->stencilStoreOp, VK_ATTACHMENT_STORE_OP_STORE)) {
Chris Forbesc785a102016-05-17 14:59:22 +120011015 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011016 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), true);
Chris Forbesc785a102016-05-17 14:59:22 +120011017 return false;
11018 };
11019 pCB->validate_functions.push_back(function);
Chris Forbescc836ab2016-09-26 17:04:41 +130011020 } else if (FormatSpecificLoadAndStoreOpSettings(pAttachment->format, pAttachment->storeOp,
11021 pAttachment->stencilStoreOp,
Mark Lobodzinski7b58e692016-06-10 15:28:17 -060011022 VK_ATTACHMENT_STORE_OP_DONT_CARE)) {
Chris Forbesc785a102016-05-17 14:59:22 +120011023 std::function<bool()> function = [=]() {
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011024 SetImageMemoryValid(dev_data, getImageState(dev_data, fb_info.image), false);
Chris Forbesc785a102016-05-17 14:59:22 +120011025 return false;
11026 };
11027 pCB->validate_functions.push_back(function);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011028 }
11029 }
11030 }
Mike Weiblen6daea5b2016-12-19 20:41:58 -070011031 skip_call |= outsideRenderPass(dev_data, pCB, "vkCmdEndRenderpass()", VALIDATION_ERROR_00464);
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011032 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdEndRenderPass", VALIDATION_ERROR_00465);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070011033 skip_call |= ValidateCmd(dev_data, pCB, CMD_ENDRENDERPASS, "vkCmdEndRenderPass()");
11034 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_ENDRENDERPASS);
Chris Forbes2886a9e2016-09-07 13:52:28 +120011035 }
11036 lock.unlock();
11037
11038 if (skip_call)
11039 return;
11040
Chris Forbesaaa9c282016-10-03 20:01:14 +130011041 dev_data->dispatch_table.CmdEndRenderPass(commandBuffer);
Chris Forbes2886a9e2016-09-07 13:52:28 +120011042
11043 if (pCB) {
11044 lock.lock();
Chris Forbes05e03b72016-05-17 15:27:58 +120011045 TransitionFinalSubpassLayouts(dev_data, pCB, &pCB->activeRenderPassBeginInfo);
Chris Forbesc785a102016-05-17 14:59:22 +120011046 pCB->activeRenderPass = nullptr;
11047 pCB->activeSubpass = 0;
11048 pCB->activeFramebuffer = VK_NULL_HANDLE;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011049 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011050}
11051
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011052static bool logInvalidAttachmentMessage(layer_data *dev_data, VkCommandBuffer secondaryBuffer, uint32_t primaryAttach,
11053 uint32_t secondaryAttach, const char *msg) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011054 return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011055 VALIDATION_ERROR_02059, "DS",
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011056 "vkCmdExecuteCommands() called w/ invalid Secondary Cmd Buffer 0x%" PRIx64 " which has a render pass "
11057 "that is not compatible with the Primary Cmd Buffer current render pass. "
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011058 "Attachment %u is not compatible with %u: %s. %s",
11059 reinterpret_cast<uint64_t &>(secondaryBuffer), primaryAttach, secondaryAttach, msg,
11060 validation_error_map[VALIDATION_ERROR_02059]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011061}
11062
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011063static bool validateAttachmentCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11064 VkRenderPassCreateInfo const *primaryPassCI, uint32_t primaryAttach,
11065 VkCommandBuffer secondaryBuffer, VkRenderPassCreateInfo const *secondaryPassCI,
Dustin Graves8f1eab92016-04-05 09:41:17 -060011066 uint32_t secondaryAttach, bool is_multi) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011067 bool skip_call = false;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011068 if (primaryPassCI->attachmentCount <= primaryAttach) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011069 primaryAttach = VK_ATTACHMENT_UNUSED;
11070 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011071 if (secondaryPassCI->attachmentCount <= secondaryAttach) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011072 secondaryAttach = VK_ATTACHMENT_UNUSED;
11073 }
11074 if (primaryAttach == VK_ATTACHMENT_UNUSED && secondaryAttach == VK_ATTACHMENT_UNUSED) {
11075 return skip_call;
11076 }
11077 if (primaryAttach == VK_ATTACHMENT_UNUSED) {
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011078 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11079 "The first is unused while the second is not.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011080 return skip_call;
11081 }
11082 if (secondaryAttach == VK_ATTACHMENT_UNUSED) {
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011083 skip_call |= logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach,
11084 "The second is unused while the first is not.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011085 return skip_call;
11086 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011087 if (primaryPassCI->pAttachments[primaryAttach].format != secondaryPassCI->pAttachments[secondaryAttach].format) {
11088 skip_call |=
11089 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different formats.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011090 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011091 if (primaryPassCI->pAttachments[primaryAttach].samples != secondaryPassCI->pAttachments[secondaryAttach].samples) {
11092 skip_call |=
11093 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different samples.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011094 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011095 if (is_multi && primaryPassCI->pAttachments[primaryAttach].flags != secondaryPassCI->pAttachments[secondaryAttach].flags) {
11096 skip_call |=
11097 logInvalidAttachmentMessage(dev_data, secondaryBuffer, primaryAttach, secondaryAttach, "They have different flags.");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011098 }
11099 return skip_call;
11100}
11101
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011102static bool validateSubpassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11103 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11104 VkRenderPassCreateInfo const *secondaryPassCI, const int subpass, bool is_multi) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011105 bool skip_call = false;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011106 const VkSubpassDescription &primary_desc = primaryPassCI->pSubpasses[subpass];
11107 const VkSubpassDescription &secondary_desc = secondaryPassCI->pSubpasses[subpass];
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011108 uint32_t maxInputAttachmentCount = std::max(primary_desc.inputAttachmentCount, secondary_desc.inputAttachmentCount);
11109 for (uint32_t i = 0; i < maxInputAttachmentCount; ++i) {
11110 uint32_t primary_input_attach = VK_ATTACHMENT_UNUSED, secondary_input_attach = VK_ATTACHMENT_UNUSED;
11111 if (i < primary_desc.inputAttachmentCount) {
11112 primary_input_attach = primary_desc.pInputAttachments[i].attachment;
11113 }
11114 if (i < secondary_desc.inputAttachmentCount) {
11115 secondary_input_attach = secondary_desc.pInputAttachments[i].attachment;
11116 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011117 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_input_attach, secondaryBuffer,
11118 secondaryPassCI, secondary_input_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011119 }
11120 uint32_t maxColorAttachmentCount = std::max(primary_desc.colorAttachmentCount, secondary_desc.colorAttachmentCount);
11121 for (uint32_t i = 0; i < maxColorAttachmentCount; ++i) {
11122 uint32_t primary_color_attach = VK_ATTACHMENT_UNUSED, secondary_color_attach = VK_ATTACHMENT_UNUSED;
11123 if (i < primary_desc.colorAttachmentCount) {
11124 primary_color_attach = primary_desc.pColorAttachments[i].attachment;
11125 }
11126 if (i < secondary_desc.colorAttachmentCount) {
11127 secondary_color_attach = secondary_desc.pColorAttachments[i].attachment;
11128 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011129 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_color_attach, secondaryBuffer,
11130 secondaryPassCI, secondary_color_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011131 uint32_t primary_resolve_attach = VK_ATTACHMENT_UNUSED, secondary_resolve_attach = VK_ATTACHMENT_UNUSED;
11132 if (i < primary_desc.colorAttachmentCount && primary_desc.pResolveAttachments) {
11133 primary_resolve_attach = primary_desc.pResolveAttachments[i].attachment;
11134 }
11135 if (i < secondary_desc.colorAttachmentCount && secondary_desc.pResolveAttachments) {
11136 secondary_resolve_attach = secondary_desc.pResolveAttachments[i].attachment;
11137 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011138 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_resolve_attach,
11139 secondaryBuffer, secondaryPassCI, secondary_resolve_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011140 }
11141 uint32_t primary_depthstencil_attach = VK_ATTACHMENT_UNUSED, secondary_depthstencil_attach = VK_ATTACHMENT_UNUSED;
11142 if (primary_desc.pDepthStencilAttachment) {
11143 primary_depthstencil_attach = primary_desc.pDepthStencilAttachment[0].attachment;
11144 }
11145 if (secondary_desc.pDepthStencilAttachment) {
11146 secondary_depthstencil_attach = secondary_desc.pDepthStencilAttachment[0].attachment;
11147 }
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011148 skip_call |= validateAttachmentCompatibility(dev_data, primaryBuffer, primaryPassCI, primary_depthstencil_attach,
11149 secondaryBuffer, secondaryPassCI, secondary_depthstencil_attach, is_multi);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011150 return skip_call;
11151}
11152
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011153// Verify that given renderPass CreateInfo for primary and secondary command buffers are compatible.
11154// This function deals directly with the CreateInfo, there are overloaded versions below that can take the renderPass handle and
11155// will then feed into this function
11156static bool validateRenderPassCompatibility(layer_data *dev_data, VkCommandBuffer primaryBuffer,
11157 VkRenderPassCreateInfo const *primaryPassCI, VkCommandBuffer secondaryBuffer,
11158 VkRenderPassCreateInfo const *secondaryPassCI) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011159 bool skip_call = false;
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011160
11161 if (primaryPassCI->subpassCount != secondaryPassCI->subpassCount) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011162 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11163 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011164 "vkCmdExecuteCommands() called w/ invalid secondary Cmd Buffer 0x%" PRIx64
11165 " that has a subpassCount of %u that is incompatible with the primary Cmd Buffer 0x%" PRIx64
11166 " that has a subpassCount of %u.",
11167 reinterpret_cast<uint64_t &>(secondaryBuffer), secondaryPassCI->subpassCount,
11168 reinterpret_cast<uint64_t &>(primaryBuffer), primaryPassCI->subpassCount);
11169 } else {
11170 for (uint32_t i = 0; i < primaryPassCI->subpassCount; ++i) {
11171 skip_call |= validateSubpassCompatibility(dev_data, primaryBuffer, primaryPassCI, secondaryBuffer, secondaryPassCI, i,
11172 primaryPassCI->subpassCount > 1);
11173 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011174 }
11175 return skip_call;
11176}
11177
Dustin Graves8f1eab92016-04-05 09:41:17 -060011178static bool validateFramebuffer(layer_data *dev_data, VkCommandBuffer primaryBuffer, const GLOBAL_CB_NODE *pCB,
11179 VkCommandBuffer secondaryBuffer, const GLOBAL_CB_NODE *pSubCB) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011180 bool skip_call = false;
11181 if (!pSubCB->beginInfo.pInheritanceInfo) {
11182 return skip_call;
11183 }
Chris Forbes89ca84a2016-05-13 16:23:58 +120011184 VkFramebuffer primary_fb = pCB->activeFramebuffer;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011185 VkFramebuffer secondary_fb = pSubCB->beginInfo.pInheritanceInfo->framebuffer;
11186 if (secondary_fb != VK_NULL_HANDLE) {
11187 if (primary_fb != secondary_fb) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011188 skip_call |= log_msg(
11189 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11190 VALIDATION_ERROR_02060, "DS",
11191 "vkCmdExecuteCommands() called w/ invalid secondary command buffer 0x%" PRIx64 " which has a framebuffer 0x%" PRIx64
11192 " that is not the same as the primary command buffer's current active framebuffer 0x%" PRIx64 ". %s",
11193 reinterpret_cast<uint64_t &>(secondaryBuffer), reinterpret_cast<uint64_t &>(secondary_fb),
11194 reinterpret_cast<uint64_t &>(primary_fb), validation_error_map[VALIDATION_ERROR_02060]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011195 }
Tobin Ehlis04c04272016-10-12 11:54:09 -060011196 auto fb = getFramebufferState(dev_data, secondary_fb);
Chris Forbesb065df02016-05-17 15:45:31 +120011197 if (!fb) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011198 skip_call |=
11199 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
Mark Muelleraab36502016-05-03 13:17:29 -060011200 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11201 "which has invalid framebuffer 0x%" PRIx64 ".",
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011202 (void *)secondaryBuffer, (uint64_t)(secondary_fb));
11203 return skip_call;
11204 }
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011205 auto cb_renderpass = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011206 if (cb_renderpass->renderPass != fb->createInfo.renderPass) {
11207 skip_call |= validateRenderPassCompatibility(dev_data, secondaryBuffer, fb->renderPassCreateInfo.ptr(), secondaryBuffer,
Chris Forbesef730462016-09-27 12:03:31 +130011208 cb_renderpass->createInfo.ptr());
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011209 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011210 }
11211 return skip_call;
11212}
11213
Dustin Graves8f1eab92016-04-05 09:41:17 -060011214static bool validateSecondaryCommandBufferState(layer_data *dev_data, GLOBAL_CB_NODE *pCB, GLOBAL_CB_NODE *pSubCB) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011215 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011216 unordered_set<int> activeTypes;
11217 for (auto queryObject : pCB->activeQueries) {
11218 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11219 if (queryPoolData != dev_data->queryPoolMap.end()) {
11220 if (queryPoolData->second.createInfo.queryType == VK_QUERY_TYPE_PIPELINE_STATISTICS &&
11221 pSubCB->beginInfo.pInheritanceInfo) {
11222 VkQueryPipelineStatisticFlags cmdBufStatistics = pSubCB->beginInfo.pInheritanceInfo->pipelineStatistics;
11223 if ((cmdBufStatistics & queryPoolData->second.createInfo.pipelineStatistics) != cmdBufStatistics) {
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011224 skip_call |=
11225 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11226 VALIDATION_ERROR_02065, "DS", "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11227 "which has invalid active query pool 0x%" PRIx64
11228 ". Pipeline statistics is being queried so the command "
11229 "buffer must have all bits set on the queryPool. %s",
11230 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11231 validation_error_map[VALIDATION_ERROR_02065]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011232 }
11233 }
11234 activeTypes.insert(queryPoolData->second.createInfo.queryType);
11235 }
11236 }
11237 for (auto queryObject : pSubCB->startedQueries) {
11238 auto queryPoolData = dev_data->queryPoolMap.find(queryObject.pool);
11239 if (queryPoolData != dev_data->queryPoolMap.end() && activeTypes.count(queryPoolData->second.createInfo.queryType)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011240 skip_call |=
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011241 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0, __LINE__,
11242 DRAWSTATE_INVALID_SECONDARY_COMMAND_BUFFER, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011243 "vkCmdExecuteCommands() called w/ invalid Cmd Buffer 0x%p "
11244 "which has invalid active query pool 0x%" PRIx64 "of type %d but a query of that type has been started on "
11245 "secondary Cmd Buffer 0x%p.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011246 pCB->commandBuffer, reinterpret_cast<const uint64_t &>(queryPoolData->first),
11247 queryPoolData->second.createInfo.queryType, pSubCB->commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011248 }
11249 }
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -060011250
11251 auto primary_pool = getCommandPoolNode(dev_data, pCB->createInfo.commandPool);
11252 auto secondary_pool = getCommandPoolNode(dev_data, pSubCB->createInfo.commandPool);
11253 if (primary_pool && secondary_pool && (primary_pool->queueFamilyIndex != secondary_pool->queueFamilyIndex)) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011254 skip_call |=
11255 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11256 reinterpret_cast<uint64_t>(pSubCB->commandBuffer), __LINE__, DRAWSTATE_INVALID_QUEUE_FAMILY, "DS",
11257 "vkCmdExecuteCommands(): Primary command buffer 0x%p"
11258 " created in queue family %d has secondary command buffer 0x%p created in queue family %d.",
11259 pCB->commandBuffer, primary_pool->queueFamilyIndex, pSubCB->commandBuffer, secondary_pool->queueFamilyIndex);
Mark Lobodzinskieb30fd72016-08-09 16:42:24 -060011260 }
11261
Tobin Ehlisfe871282016-06-28 10:28:02 -060011262 return skip_call;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011263}
11264
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011265VKAPI_ATTR void VKAPI_CALL
11266CmdExecuteCommands(VkCommandBuffer commandBuffer, uint32_t commandBuffersCount, const VkCommandBuffer *pCommandBuffers) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011267 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011268 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(commandBuffer), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011269 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011270 GLOBAL_CB_NODE *pCB = getCBNode(dev_data, commandBuffer);
11271 if (pCB) {
11272 GLOBAL_CB_NODE *pSubCB = NULL;
11273 for (uint32_t i = 0; i < commandBuffersCount; i++) {
11274 pSubCB = getCBNode(dev_data, pCommandBuffers[i]);
Tobin Ehlis44ba5fc2017-01-03 14:07:17 -070011275 assert(pSubCB);
11276 if (VK_COMMAND_BUFFER_LEVEL_PRIMARY == pSubCB->createInfo.level) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011277 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
Mike Weiblen25c90822016-12-06 17:10:22 -070011278 __LINE__, VALIDATION_ERROR_00153, "DS",
Tobin Ehlisfe871282016-06-28 10:28:02 -060011279 "vkCmdExecuteCommands() called w/ Primary Cmd Buffer 0x%p in element %u of pCommandBuffers "
Mike Weiblen25c90822016-12-06 17:10:22 -070011280 "array. All cmd buffers in pCommandBuffers array must be secondary. %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011281 pCommandBuffers[i], i, validation_error_map[VALIDATION_ERROR_00153]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011282 } else if (pCB->activeRenderPass) { // Secondary CB w/i RenderPass must have *CONTINUE_BIT set
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011283 auto secondary_rp_state = getRenderPassState(dev_data, pSubCB->beginInfo.pInheritanceInfo->renderPass);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011284 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011285 skip_call |= log_msg(
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011286 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mike Weiblen25c90822016-12-06 17:10:22 -070011287 (uint64_t)pCommandBuffers[i], __LINE__, VALIDATION_ERROR_02057, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011288 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) executed within render pass (0x%" PRIxLEAST64
Mike Weiblen25c90822016-12-06 17:10:22 -070011289 ") must have had vkBeginCommandBuffer() called w/ VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT set. %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011290 pCommandBuffers[i], (uint64_t)pCB->activeRenderPass->renderPass,
Mike Weiblen25c90822016-12-06 17:10:22 -070011291 validation_error_map[VALIDATION_ERROR_02057]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011292 } else {
11293 // Make sure render pass is compatible with parent command buffer pass if has continue
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011294 if (pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) {
Chris Forbesef730462016-09-27 12:03:31 +130011295 skip_call |=
11296 validateRenderPassCompatibility(dev_data, commandBuffer, pCB->activeRenderPass->createInfo.ptr(),
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011297 pCommandBuffers[i], secondary_rp_state->createInfo.ptr());
Tobin Ehlis4ca15c72016-06-30 09:29:18 -060011298 }
Tobin Ehlisf77f5cc2016-07-19 10:45:24 -060011299 // If framebuffer for secondary CB is not NULL, then it must match active FB from primaryCB
Tobin Ehlisfe871282016-06-28 10:28:02 -060011300 skip_call |= validateFramebuffer(dev_data, commandBuffer, pCB, pCommandBuffers[i], pSubCB);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011301 }
11302 string errorString = "";
Tobin Ehlisf77f5cc2016-07-19 10:45:24 -060011303 // secondaryCB must have been created w/ RP compatible w/ primaryCB active renderpass
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011304 if ((pCB->activeRenderPass->renderPass != secondary_rp_state->renderPass) &&
Chris Forbesef730462016-09-27 12:03:31 +130011305 !verify_renderpass_compatibility(dev_data, pCB->activeRenderPass->createInfo.ptr(),
Tobin Ehlis95ccf3e2016-10-12 15:24:03 -060011306 secondary_rp_state->createInfo.ptr(), errorString)) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060011307 skip_call |= log_msg(
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011308 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11309 (uint64_t)pCommandBuffers[i], __LINE__, DRAWSTATE_RENDERPASS_INCOMPATIBLE, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011310 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) w/ render pass (0x%" PRIxLEAST64
11311 ") is incompatible w/ primary command buffer (0x%p) w/ render pass (0x%" PRIxLEAST64 ") due to: %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011312 pCommandBuffers[i], (uint64_t)pSubCB->beginInfo.pInheritanceInfo->renderPass, commandBuffer,
Chris Forbesa4937a72016-05-06 16:31:14 +120011313 (uint64_t)pCB->activeRenderPass->renderPass, errorString.c_str());
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011314 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011315 }
11316 // TODO(mlentine): Move more logic into this method
Tobin Ehlisfe871282016-06-28 10:28:02 -060011317 skip_call |= validateSecondaryCommandBufferState(dev_data, pCB, pSubCB);
Tobin Ehlisf7cf9152016-09-27 13:10:33 -060011318 skip_call |= validateCommandBufferState(dev_data, pSubCB, "vkCmdExecuteCommands()");
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011319 // Secondary cmdBuffers are considered pending execution starting w/
11320 // being recorded
11321 if (!(pSubCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT)) {
11322 if (dev_data->globalInFlightCmdBuffers.find(pSubCB->commandBuffer) != dev_data->globalInFlightCmdBuffers.end()) {
Mike Weiblen25c90822016-12-06 17:10:22 -070011323 skip_call |=
11324 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11325 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, (uint64_t)(pCB->commandBuffer), __LINE__,
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011326 VALIDATION_ERROR_00154, "DS", "Attempt to simultaneously execute command buffer 0x%p"
Mike Weiblen25c90822016-12-06 17:10:22 -070011327 " without VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set! %s",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011328 pCB->commandBuffer, validation_error_map[VALIDATION_ERROR_00154]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011329 }
11330 if (pCB->beginInfo.flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT) {
11331 // Warn that non-simultaneous secondary cmd buffer renders primary non-simultaneous
Tobin Ehlisfe871282016-06-28 10:28:02 -060011332 skip_call |= log_msg(
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011333 dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
11334 (uint64_t)(pCommandBuffers[i]), __LINE__, DRAWSTATE_INVALID_CB_SIMULTANEOUS_USE, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011335 "vkCmdExecuteCommands(): Secondary Command Buffer (0x%p) "
11336 "does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT set and will cause primary command buffer "
11337 "(0x%p) to be treated as if it does not have VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT "
Tobin Ehlisfe871282016-06-28 10:28:02 -060011338 "set, even though it does.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011339 pCommandBuffers[i], pCB->commandBuffer);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011340 pCB->beginInfo.flags &= ~VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT;
11341 }
11342 }
Chris Forbes94c5f532016-10-03 17:42:38 +130011343 if (!pCB->activeQueries.empty() && !dev_data->enabled_features.inheritedQueries) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011344 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11345 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, reinterpret_cast<uint64_t>(pCommandBuffers[i]),
11346 __LINE__, VALIDATION_ERROR_02062, "DS", "vkCmdExecuteCommands(): Secondary Command Buffer "
11347 "(0x%p) cannot be submitted with a query in "
11348 "flight and inherited queries not "
11349 "supported on this device. %s",
11350 pCommandBuffers[i], validation_error_map[VALIDATION_ERROR_02062]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011351 }
Tobin Ehlisd6280b12016-11-03 10:46:44 -060011352 // Propagate layout transitions to the primary cmd buffer
11353 for (auto ilm_entry : pSubCB->imageLayoutMap) {
11354 SetLayout(pCB, ilm_entry.first, ilm_entry.second);
11355 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011356 pSubCB->primaryCommandBuffer = pCB->commandBuffer;
11357 pCB->secondaryCommandBuffers.insert(pSubCB->commandBuffer);
11358 dev_data->globalInFlightCmdBuffers.insert(pSubCB->commandBuffer);
Michael Lentinef01fb382016-07-21 17:24:56 -050011359 for (auto &function : pSubCB->queryUpdates) {
11360 pCB->queryUpdates.push_back(function);
11361 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011362 }
Mike Schuchardt8fb38062016-12-08 15:36:24 -070011363 skip_call |= validatePrimaryCommandBuffer(dev_data, pCB, "vkCmdExecuteComands", VALIDATION_ERROR_00163);
Tobin Ehlis1c883a02016-12-19 15:59:16 -070011364 skip_call |= ValidateCmd(dev_data, pCB, CMD_EXECUTECOMMANDS, "vkCmdExecuteComands()");
11365 UpdateCmdBufferLastCmd(dev_data, pCB, CMD_EXECUTECOMMANDS);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011366 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011367 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011368 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130011369 dev_data->dispatch_table.CmdExecuteCommands(commandBuffer, commandBuffersCount, pCommandBuffers);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011370}
11371
Tobin Ehlis9e40f0d2016-08-04 10:17:19 -060011372// For any image objects that overlap mapped memory, verify that their layouts are PREINIT or GENERAL
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011373static bool ValidateMapImageLayouts(VkDevice device, DEVICE_MEM_INFO const *mem_info, VkDeviceSize offset,
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011374 VkDeviceSize end_offset) {
Dustin Graves8f1eab92016-04-05 09:41:17 -060011375 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011376 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011377 // Iterate over all bound image ranges and verify that for any that overlap the
11378 // map ranges, the layouts are VK_IMAGE_LAYOUT_PREINITIALIZED or VK_IMAGE_LAYOUT_GENERAL
11379 // TODO : This can be optimized if we store ranges based on starting address and early exit when we pass our range
11380 for (auto image_handle : mem_info->bound_images) {
11381 auto img_it = mem_info->bound_ranges.find(image_handle);
11382 if (img_it != mem_info->bound_ranges.end()) {
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011383 if (rangesIntersect(dev_data, &img_it->second, offset, end_offset)) {
Tobin Ehlis9e40f0d2016-08-04 10:17:19 -060011384 std::vector<VkImageLayout> layouts;
Tobin Ehlis12a4b5e2016-08-08 12:33:11 -060011385 if (FindLayouts(dev_data, VkImage(image_handle), layouts)) {
Tobin Ehlis9e40f0d2016-08-04 10:17:19 -060011386 for (auto layout : layouts) {
11387 if (layout != VK_IMAGE_LAYOUT_PREINITIALIZED && layout != VK_IMAGE_LAYOUT_GENERAL) {
11388 skip_call |=
11389 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, 0,
11390 __LINE__, DRAWSTATE_INVALID_IMAGE_LAYOUT, "DS", "Cannot map an image with layout %s. Only "
11391 "GENERAL or PREINITIALIZED are supported.",
11392 string_VkImageLayout(layout));
11393 }
11394 }
11395 }
11396 }
11397 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011398 }
11399 return skip_call;
11400}
11401
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011402VKAPI_ATTR VkResult VKAPI_CALL
11403MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags, void **ppData) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011404 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11405
Dustin Graves8f1eab92016-04-05 09:41:17 -060011406 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011407 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011408 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011409 DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
11410 if (mem_info) {
Tobin Ehlisc3e9c7b2016-08-10 17:00:51 -060011411 // TODO : This could me more fine-grained to track just region that is valid
11412 mem_info->global_valid = true;
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011413 auto end_offset = (VK_WHOLE_SIZE == size) ? mem_info->alloc_info.allocationSize - 1 : offset + size - 1;
11414 skip_call |= ValidateMapImageLayouts(device, mem_info, offset, end_offset);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011415 // TODO : Do we need to create new "bound_range" for the mapped range?
Tobin Ehlis0a78ef92016-08-12 14:12:44 -060011416 SetMemRangesValid(dev_data, mem_info, offset, end_offset);
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011417 if ((dev_data->phys_dev_mem_props.memoryTypes[mem_info->alloc_info.memoryTypeIndex].propertyFlags &
Tobin Ehlise54be7b2016-04-11 14:49:55 -060011418 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) {
Mike Weiblend3fb3132016-12-06 10:28:00 -070011419 skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11420 (uint64_t)mem, __LINE__, VALIDATION_ERROR_00629, "MEM",
11421 "Mapping Memory without VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set: mem obj 0x%" PRIxLEAST64 ". %s",
11422 (uint64_t)mem, validation_error_map[VALIDATION_ERROR_00629]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011423 }
11424 }
Tobin Ehlisb495d5f2016-08-04 09:33:02 -060011425 skip_call |= ValidateMapMemRange(dev_data, mem, offset, size);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011426 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011427
Dustin Graves8f1eab92016-04-05 09:41:17 -060011428 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011429 result = dev_data->dispatch_table.MapMemory(device, mem, offset, size, flags, ppData);
Tobin Ehlisd094c272016-05-12 08:31:32 -060011430 if (VK_SUCCESS == result) {
Tobin Ehlisd094c272016-05-12 08:31:32 -060011431 lock.lock();
Tobin Ehlis3d2c3162016-08-10 16:08:00 -060011432 // TODO : What's the point of this range? See comment on creating new "bound_range" above, which may replace this
Tobin Ehlisd094c272016-05-12 08:31:32 -060011433 storeMemRanges(dev_data, mem, offset, size);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011434 initializeAndTrackMemory(dev_data, mem, offset, size, ppData);
Tobin Ehlisd094c272016-05-12 08:31:32 -060011435 lock.unlock();
Tobin Ehlisd094c272016-05-12 08:31:32 -060011436 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011437 }
11438 return result;
11439}
11440
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011441VKAPI_ATTR void VKAPI_CALL UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Chris Forbes3dd83742016-10-03 19:35:49 +130011442 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -060011443 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011444
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011445 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130011446 skip_call |= deleteMemRanges(dev_data, mem);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011447 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011448 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011449 dev_data->dispatch_table.UnmapMemory(device, mem);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011450 }
11451}
11452
Chris Forbes3dd83742016-10-03 19:35:49 +130011453static bool validateMemoryIsMapped(layer_data *dev_data, const char *funcName, uint32_t memRangeCount,
Dustin Graves8f1eab92016-04-05 09:41:17 -060011454 const VkMappedMemoryRange *pMemRanges) {
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011455 bool skip = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011456 for (uint32_t i = 0; i < memRangeCount; ++i) {
Chris Forbes3dd83742016-10-03 19:35:49 +130011457 auto mem_info = getMemObjInfo(dev_data, pMemRanges[i].memory);
Tobin Ehlis997b2582016-06-02 08:43:37 -060011458 if (mem_info) {
Mark Lobodzinskib3c675e2016-11-15 08:56:03 -070011459 if (pMemRanges[i].size == VK_WHOLE_SIZE) {
11460 if (mem_info->mem_range.offset > pMemRanges[i].offset) {
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011461 skip |=
Mark Lobodzinskib3c675e2016-11-15 08:56:03 -070011462 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11463 (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00643, "MEM",
11464 "%s: Flush/Invalidate offset (" PRINTF_SIZE_T_SPECIFIER ") is less than Memory Object's offset "
11465 "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11466 funcName, static_cast<size_t>(pMemRanges[i].offset),
11467 static_cast<size_t>(mem_info->mem_range.offset), validation_error_map[VALIDATION_ERROR_00643]);
11468 }
11469 } else {
11470 const uint64_t data_end = (mem_info->mem_range.size == VK_WHOLE_SIZE)
11471 ? mem_info->alloc_info.allocationSize
11472 : (mem_info->mem_range.offset + mem_info->mem_range.size);
11473 if ((mem_info->mem_range.offset > pMemRanges[i].offset) ||
11474 (data_end < (pMemRanges[i].offset + pMemRanges[i].size))) {
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011475 skip |=
Mark Lobodzinskib3c675e2016-11-15 08:56:03 -070011476 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11477 (uint64_t)pMemRanges[i].memory, __LINE__, VALIDATION_ERROR_00642, "MEM",
11478 "%s: Flush/Invalidate size or offset (" PRINTF_SIZE_T_SPECIFIER ", " PRINTF_SIZE_T_SPECIFIER
11479 ") exceed the Memory Object's upper-bound "
11480 "(" PRINTF_SIZE_T_SPECIFIER "). %s",
11481 funcName, static_cast<size_t>(pMemRanges[i].offset + pMemRanges[i].size),
11482 static_cast<size_t>(pMemRanges[i].offset), static_cast<size_t>(data_end),
11483 validation_error_map[VALIDATION_ERROR_00642]);
11484 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011485 }
11486 }
11487 }
Mark Lobodzinski4a95cfe2016-11-15 13:55:22 -070011488 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011489}
11490
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011491static bool ValidateAndCopyNoncoherentMemoryToDriver(layer_data *dev_data, uint32_t mem_range_count,
11492 const VkMappedMemoryRange *mem_ranges) {
11493 bool skip = false;
11494 for (uint32_t i = 0; i < mem_range_count; ++i) {
11495 auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
Tobin Ehlis997b2582016-06-02 08:43:37 -060011496 if (mem_info) {
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011497 if (mem_info->shadow_copy) {
11498 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11499 ? mem_info->mem_range.size
Tobin Ehlis968f5dd2016-10-05 07:50:25 -060011500 : (mem_info->alloc_info.allocationSize - mem_info->mem_range.offset);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011501 char *data = static_cast<char *>(mem_info->shadow_copy);
11502 for (uint64_t j = 0; j < mem_info->shadow_pad_size; ++j) {
11503 if (data[j] != NoncoherentMemoryFillValue) {
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011504 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11505 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11506 MEMTRACK_INVALID_MAP, "MEM", "Memory underflow was detected on mem obj 0x%" PRIxLEAST64,
11507 (uint64_t)mem_ranges[i].memory);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011508 }
11509 }
11510 for (uint64_t j = (size + mem_info->shadow_pad_size); j < (2 * mem_info->shadow_pad_size + size); ++j) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011511 if (data[j] != NoncoherentMemoryFillValue) {
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011512 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
11513 VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem_ranges[i].memory, __LINE__,
11514 MEMTRACK_INVALID_MAP, "MEM", "Memory overflow was detected on mem obj 0x%" PRIxLEAST64,
11515 (uint64_t)mem_ranges[i].memory);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011516 }
11517 }
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011518 memcpy(mem_info->p_driver_data, static_cast<void *>(data + mem_info->shadow_pad_size), (size_t)(size));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011519 }
11520 }
11521 }
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011522 return skip;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011523}
11524
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011525static void CopyNoncoherentMemoryFromDriver(layer_data *dev_data, uint32_t mem_range_count, const VkMappedMemoryRange *mem_ranges) {
11526 for (uint32_t i = 0; i < mem_range_count; ++i) {
Chris Forbes3dd83742016-10-03 19:35:49 +130011527 auto mem_info = getMemObjInfo(dev_data, mem_ranges[i].memory);
Mark Lobodzinski066b8422016-08-15 14:27:26 -060011528 if (mem_info && mem_info->shadow_copy) {
11529 VkDeviceSize size = (mem_info->mem_range.size != VK_WHOLE_SIZE)
11530 ? mem_info->mem_range.size
11531 : (mem_info->alloc_info.allocationSize - mem_ranges[i].offset);
11532 char *data = static_cast<char *>(mem_info->shadow_copy);
11533 memcpy(data + mem_info->shadow_pad_size, mem_info->p_driver_data, (size_t)(size));
Mark Lobodzinskibc4d6202016-08-16 09:06:15 -060011534 }
11535 }
11536}
11537
Mark Lobodzinskidcefe7f2016-11-14 16:28:01 -070011538static bool ValidateMappedMemoryRangeDeviceLimits(layer_data *dev_data, const char *func_name, uint32_t mem_range_count,
11539 const VkMappedMemoryRange *mem_ranges) {
11540 bool skip = false;
11541 for (uint32_t i = 0; i < mem_range_count; ++i) {
11542 uint64_t atom_size = dev_data->phys_dev_properties.properties.limits.nonCoherentAtomSize;
11543 if (vk_safe_modulo(mem_ranges[i].offset, atom_size) != 0) {
11544 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11545 __LINE__, VALIDATION_ERROR_00644, "MEM",
11546 "%s: Offset in pMemRanges[%d] is 0x%" PRIxLEAST64
11547 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11548 func_name, i, mem_ranges[i].offset, atom_size, validation_error_map[VALIDATION_ERROR_00644]);
11549 }
11550 if ((mem_ranges[i].size != VK_WHOLE_SIZE) && (vk_safe_modulo(mem_ranges[i].size, atom_size) != 0)) {
11551 skip |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0,
11552 __LINE__, VALIDATION_ERROR_00645, "MEM",
11553 "%s: Size in pMemRanges[%d] is 0x%" PRIxLEAST64
11554 ", which is not a multiple of VkPhysicalDeviceLimits::nonCoherentAtomSize (0x%" PRIxLEAST64 "). %s",
11555 func_name, i, mem_ranges[i].size, atom_size, validation_error_map[VALIDATION_ERROR_00645]);
11556 }
11557 }
11558 return skip;
11559}
11560
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011561static bool PreCallValidateFlushMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11562 const VkMappedMemoryRange *mem_ranges) {
11563 bool skip = false;
11564 std::lock_guard<std::mutex> lock(global_lock);
11565 skip |= ValidateAndCopyNoncoherentMemoryToDriver(dev_data, mem_range_count, mem_ranges);
11566 skip |= validateMemoryIsMapped(dev_data, "vkFlushMappedMemoryRanges", mem_range_count, mem_ranges);
11567 return skip;
11568}
11569
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011570VKAPI_ATTR VkResult VKAPI_CALL FlushMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11571 const VkMappedMemoryRange *pMemRanges) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011572 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbes3dd83742016-10-03 19:35:49 +130011573 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011574
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011575 if (!PreCallValidateFlushMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011576 result = dev_data->dispatch_table.FlushMappedMemoryRanges(device, memRangeCount, pMemRanges);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011577 }
11578 return result;
11579}
11580
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011581static bool PreCallValidateInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11582 const VkMappedMemoryRange *mem_ranges) {
11583 bool skip = false;
11584 std::lock_guard<std::mutex> lock(global_lock);
11585 skip |= validateMemoryIsMapped(dev_data, "vkInvalidateMappedMemoryRanges", mem_range_count, mem_ranges);
11586 return skip;
11587}
11588
11589static void PostCallRecordInvalidateMappedMemoryRanges(layer_data *dev_data, uint32_t mem_range_count,
11590 const VkMappedMemoryRange *mem_ranges) {
11591 std::lock_guard<std::mutex> lock(global_lock);
11592 // Update our shadow copy with modified driver data
11593 CopyNoncoherentMemoryFromDriver(dev_data, mem_range_count, mem_ranges);
11594}
11595
Mark Lobodzinski945c6872016-11-15 07:48:49 -070011596VKAPI_ATTR VkResult VKAPI_CALL InvalidateMappedMemoryRanges(VkDevice device, uint32_t memRangeCount,
11597 const VkMappedMemoryRange *pMemRanges) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011598 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Chris Forbes3dd83742016-10-03 19:35:49 +130011599 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011600
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011601 if (!PreCallValidateInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges)) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011602 result = dev_data->dispatch_table.InvalidateMappedMemoryRanges(device, memRangeCount, pMemRanges);
Mark Lobodzinski16ae4402016-11-15 07:59:58 -070011603 if (result == VK_SUCCESS) {
11604 PostCallRecordInvalidateMappedMemoryRanges(dev_data, memRangeCount, pMemRanges);
11605 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011606 }
11607 return result;
11608}
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011609
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011610VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory(VkDevice device, VkImage image, VkDeviceMemory mem, VkDeviceSize memoryOffset) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011611 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
11612 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisfe871282016-06-28 10:28:02 -060011613 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011614 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011615 auto image_state = getImageState(dev_data, image);
11616 if (image_state) {
Tobin Ehlisf263ba42016-04-05 13:33:00 -060011617 // Track objects tied to memory
Mark Lobodzinskif2904db2016-05-03 15:31:26 -060011618 uint64_t image_handle = reinterpret_cast<uint64_t &>(image);
Tobin Ehlis4ff58172016-09-22 10:52:00 -060011619 skip_call = SetMemBinding(dev_data, mem, image_handle, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, "vkBindImageMemory");
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011620 if (!image_state->memory_requirements_checked) {
11621 // There's not an explicit requirement in the spec to call vkGetImageMemoryRequirements() prior to calling
11622 // BindImageMemory but it's implied in that memory being bound must conform with VkMemoryRequirements from
11623 // vkGetImageMemoryRequirements()
11624 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11625 image_handle, __LINE__, DRAWSTATE_INVALID_IMAGE, "DS",
Mark Lobodzinskifa572262016-11-22 15:29:38 -070011626 "vkBindImageMemory(): Binding memory to image 0x%" PRIxLEAST64
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011627 " but vkGetImageMemoryRequirements() has not been called on that image.",
11628 image_handle);
11629 // Make the call for them so we can verify the state
11630 lock.unlock();
11631 dev_data->dispatch_table.GetImageMemoryRequirements(device, image, &image_state->requirements);
11632 lock.lock();
11633 }
Mark Lobodzinskif2904db2016-05-03 15:31:26 -060011634
11635 // Track and validate bound memory range information
Tobin Ehlis997b2582016-06-02 08:43:37 -060011636 auto mem_info = getMemObjInfo(dev_data, mem);
11637 if (mem_info) {
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011638 skip_call |= InsertImageMemoryRange(dev_data, image, mem_info, memoryOffset, image_state->requirements,
Tobin Ehlis30df15c2016-10-12 17:17:57 -060011639 image_state->createInfo.tiling == VK_IMAGE_TILING_LINEAR);
Mike Weiblendcca8592016-12-15 12:24:24 -070011640 skip_call |= ValidateMemoryTypes(dev_data, mem_info, image_state->requirements.memoryTypeBits, "vkBindImageMemory()",
11641 VALIDATION_ERROR_00806);
Mark Lobodzinskif2904db2016-05-03 15:31:26 -060011642 }
11643
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011644 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060011645 if (!skip_call) {
Chris Forbesaaa9c282016-10-03 20:01:14 +130011646 result = dev_data->dispatch_table.BindImageMemory(device, image, mem, memoryOffset);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011647 lock.lock();
Tobin Ehlis54108272016-10-11 14:26:49 -060011648 image_state->binding.mem = mem;
11649 image_state->binding.offset = memoryOffset;
Tobin Ehlis9cb84402016-11-17 13:51:54 -070011650 image_state->binding.size = image_state->requirements.size;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011651 lock.unlock();
Tobin Ehlisf263ba42016-04-05 13:33:00 -060011652 }
11653 } else {
11654 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
11655 reinterpret_cast<const uint64_t &>(image), __LINE__, MEMTRACK_INVALID_OBJECT, "MT",
Mark Muelleraab36502016-05-03 13:17:29 -060011656 "vkBindImageMemory: Cannot find invalid image 0x%" PRIx64 ", has it already been deleted?",
Tobin Ehlisf263ba42016-04-05 13:33:00 -060011657 reinterpret_cast<const uint64_t &>(image));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011658 }
11659 return result;
11660}
11661
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011662VKAPI_ATTR VkResult VKAPI_CALL SetEvent(VkDevice device, VkEvent event) {
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011663 bool skip_call = false;
11664 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011665 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011666 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis1af17132016-10-20 14:17:21 -060011667 auto event_state = getEventNode(dev_data, event);
11668 if (event_state) {
11669 event_state->needsSignaled = false;
11670 event_state->stageMask = VK_PIPELINE_STAGE_HOST_BIT;
11671 if (event_state->write_in_use) {
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011672 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT,
11673 reinterpret_cast<const uint64_t &>(event), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Mark Muelleraab36502016-05-03 13:17:29 -060011674 "Cannot call vkSetEvent() on event 0x%" PRIxLEAST64 " that is already in use by a command buffer.",
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011675 reinterpret_cast<const uint64_t &>(event));
11676 }
11677 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011678 lock.unlock();
Tobin Ehlise6ab6932016-04-07 11:35:46 -060011679 // Host setting event is visible to all queues immediately so update stageMask for any queue that's seen this event
11680 // TODO : For correctness this needs separate fix to verify that app doesn't make incorrect assumptions about the
11681 // ordering of this command in relation to vkCmd[Set|Reset]Events (see GH297)
11682 for (auto queue_data : dev_data->queueMap) {
11683 auto event_entry = queue_data.second.eventToStageMap.find(event);
11684 if (event_entry != queue_data.second.eventToStageMap.end()) {
11685 event_entry->second |= VK_PIPELINE_STAGE_HOST_BIT;
11686 }
11687 }
Tobin Ehlis3f9e8f92016-04-13 16:18:28 -060011688 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130011689 result = dev_data->dispatch_table.SetEvent(device, event);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011690 return result;
11691}
11692
11693VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011694QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011695 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
11696 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Dustin Graves8f1eab92016-04-05 09:41:17 -060011697 bool skip_call = false;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011698 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbesd73299b2016-06-10 15:25:45 +120011699 auto pFence = getFenceNode(dev_data, fence);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -070011700 auto pQueue = getQueueState(dev_data, queue);
Chris Forbesd73299b2016-06-10 15:25:45 +120011701
Tobin Ehlisb13975e2016-04-14 07:02:43 -060011702 // First verify that fence is not in use
Chris Forbesd73299b2016-06-10 15:25:45 +120011703 skip_call |= ValidateFenceForSubmit(dev_data, pFence);
11704
Chris Forbes8320a8d2016-08-01 15:15:30 +120011705 if (pFence) {
11706 SubmitFence(pQueue, pFence, bindInfoCount);
Tobin Ehlisb13975e2016-04-14 07:02:43 -060011707 }
Chris Forbesd73299b2016-06-10 15:25:45 +120011708
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011709 for (uint32_t bindIdx = 0; bindIdx < bindInfoCount; ++bindIdx) {
11710 const VkBindSparseInfo &bindInfo = pBindInfo[bindIdx];
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011711 // Track objects tied to memory
11712 for (uint32_t j = 0; j < bindInfo.bufferBindCount; j++) {
11713 for (uint32_t k = 0; k < bindInfo.pBufferBinds[j].bindCount; k++) {
Tobin Ehlise89829a2016-10-11 17:29:32 -060011714 auto sparse_binding = bindInfo.pBufferBinds[j].pBinds[k];
11715 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11716 (uint64_t)bindInfo.pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
11717 "vkQueueBindSparse"))
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011718 skip_call = true;
11719 }
11720 }
11721 for (uint32_t j = 0; j < bindInfo.imageOpaqueBindCount; j++) {
11722 for (uint32_t k = 0; k < bindInfo.pImageOpaqueBinds[j].bindCount; k++) {
Tobin Ehlise89829a2016-10-11 17:29:32 -060011723 auto sparse_binding = bindInfo.pImageOpaqueBinds[j].pBinds[k];
11724 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, sparse_binding.size},
11725 (uint64_t)bindInfo.pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11726 "vkQueueBindSparse"))
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011727 skip_call = true;
11728 }
11729 }
11730 for (uint32_t j = 0; j < bindInfo.imageBindCount; j++) {
11731 for (uint32_t k = 0; k < bindInfo.pImageBinds[j].bindCount; k++) {
Tobin Ehlise89829a2016-10-11 17:29:32 -060011732 auto sparse_binding = bindInfo.pImageBinds[j].pBinds[k];
11733 // TODO: This size is broken for non-opaque bindings, need to update to comprehend full sparse binding data
11734 VkDeviceSize size = sparse_binding.extent.depth * sparse_binding.extent.height * sparse_binding.extent.width * 4;
11735 if (SetSparseMemBinding(dev_data, {sparse_binding.memory, sparse_binding.memoryOffset, size},
11736 (uint64_t)bindInfo.pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
11737 "vkQueueBindSparse"))
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011738 skip_call = true;
11739 }
11740 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011741
11742 std::vector<SEMAPHORE_WAIT> semaphore_waits;
11743 std::vector<VkSemaphore> semaphore_signals;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011744 for (uint32_t i = 0; i < bindInfo.waitSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +120011745 VkSemaphore semaphore = bindInfo.pWaitSemaphores[i];
11746 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11747 if (pSemaphore) {
11748 if (pSemaphore->signaled) {
Chris Forbes8320a8d2016-08-01 15:15:30 +120011749 if (pSemaphore->signaler.first != VK_NULL_HANDLE) {
11750 semaphore_waits.push_back({semaphore, pSemaphore->signaler.first, pSemaphore->signaler.second});
11751 pSemaphore->in_use.fetch_add(1);
11752 }
11753 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes220fd472016-06-21 18:59:28 +120011754 pSemaphore->signaled = false;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011755 } else {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011756 skip_call |= log_msg(
11757 dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11758 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
11759 "vkQueueBindSparse: Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.",
11760 queue, reinterpret_cast<const uint64_t &>(semaphore));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011761 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011762 }
11763 }
11764 for (uint32_t i = 0; i < bindInfo.signalSemaphoreCount; ++i) {
Chris Forbes220fd472016-06-21 18:59:28 +120011765 VkSemaphore semaphore = bindInfo.pSignalSemaphores[i];
11766 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
11767 if (pSemaphore) {
11768 if (pSemaphore->signaled) {
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011769 skip_call =
11770 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
11771 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS, "DS",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011772 "vkQueueBindSparse: Queue 0x%p is signaling semaphore 0x%" PRIx64
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011773 ", but that semaphore is already signaled.",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070011774 queue, reinterpret_cast<const uint64_t &>(semaphore));
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011775 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011776 else {
11777 pSemaphore->signaler.first = queue;
11778 pSemaphore->signaler.second = pQueue->seq + pQueue->submissions.size() + 1;
11779 pSemaphore->signaled = true;
11780 pSemaphore->in_use.fetch_add(1);
11781 semaphore_signals.push_back(semaphore);
11782 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011783 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011784 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011785
11786 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11787 semaphore_waits,
11788 semaphore_signals,
11789 bindIdx == bindInfoCount - 1 ? fence : VK_NULL_HANDLE);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011790 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120011791
11792 if (pFence && !bindInfoCount) {
11793 // No work to do, just dropping a fence in the queue by itself.
11794 pQueue->submissions.emplace_back(std::vector<VkCommandBuffer>(),
11795 std::vector<SEMAPHORE_WAIT>(),
11796 std::vector<VkSemaphore>(),
11797 fence);
11798 }
11799
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011800 lock.unlock();
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011801
Dustin Graves8f1eab92016-04-05 09:41:17 -060011802 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130011803 return dev_data->dispatch_table.QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011804
11805 return result;
11806}
11807
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011808VKAPI_ATTR VkResult VKAPI_CALL CreateSemaphore(VkDevice device, const VkSemaphoreCreateInfo *pCreateInfo,
11809 const VkAllocationCallbacks *pAllocator, VkSemaphore *pSemaphore) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011810 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +130011811 VkResult result = dev_data->dispatch_table.CreateSemaphore(device, pCreateInfo, pAllocator, pSemaphore);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011812 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011813 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011814 SEMAPHORE_NODE* sNode = &dev_data->semaphoreMap[*pSemaphore];
Chris Forbes8320a8d2016-08-01 15:15:30 +120011815 sNode->signaler.first = VK_NULL_HANDLE;
11816 sNode->signaler.second = 0;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060011817 sNode->signaled = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011818 }
11819 return result;
11820}
11821
11822VKAPI_ATTR VkResult VKAPI_CALL
Chia-I Wu629d7cd2016-05-06 11:32:54 +080011823CreateEvent(VkDevice device, const VkEventCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, VkEvent *pEvent) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011824 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +130011825 VkResult result = dev_data->dispatch_table.CreateEvent(device, pCreateInfo, pAllocator, pEvent);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011826 if (result == VK_SUCCESS) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060011827 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011828 dev_data->eventMap[*pEvent].needsSignaled = false;
Tony Barbour06465372016-06-06 10:55:04 -060011829 dev_data->eventMap[*pEvent].write_in_use = 0;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011830 dev_data->eventMap[*pEvent].stageMask = VkPipelineStageFlags(0);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070011831 }
11832 return result;
11833}
11834
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011835static bool PreCallValidateCreateSwapchainKHR(layer_data *dev_data, const char *func_name,
11836 VkSwapchainCreateInfoKHR const *pCreateInfo, SURFACE_STATE *surface_state,
11837 SWAPCHAIN_NODE *old_swapchain_state) {
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011838 auto most_recent_swapchain = surface_state->swapchain ? surface_state->swapchain : surface_state->old_swapchain;
11839
Chris Forbes7be86f82016-11-25 16:17:28 +130011840 // TODO: revisit this. some of these rules are being relaxed.
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011841 if (most_recent_swapchain != old_swapchain_state || (surface_state->old_swapchain && surface_state->swapchain)) {
11842 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11843 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_ALREADY_EXISTS, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011844 "%s: surface has an existing swapchain other than oldSwapchain", func_name))
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011845 return true;
11846 }
11847 if (old_swapchain_state && old_swapchain_state->createInfo.surface != pCreateInfo->surface) {
11848 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
11849 reinterpret_cast<uint64_t const &>(pCreateInfo->oldSwapchain), __LINE__, DRAWSTATE_SWAPCHAIN_WRONG_SURFACE,
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011850 "DS", "%s: pCreateInfo->oldSwapchain's surface is not pCreateInfo->surface", func_name))
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011851 return true;
11852 }
Chris Forbesb52e0602016-10-11 16:21:32 +130011853 auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
11854 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState == UNCALLED) {
11855 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
11856 reinterpret_cast<uint64_t>(dev_data->physical_device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011857 "%s: surface capabilities not retrieved for this physical device", func_name))
Chris Forbesb52e0602016-10-11 16:21:32 +130011858 return true;
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011859 } else { // have valid capabilities
11860 auto &capabilities = physical_device_state->surfaceCapabilities;
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011861 // Validate pCreateInfo->minImageCount against VkSurfaceCapabilitiesKHR::{min|max}ImageCount:
Mike Weiblend3fb3132016-12-06 10:28:00 -070011862 if (pCreateInfo->minImageCount < capabilities.minImageCount) {
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011863 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011864 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02331, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011865 "%s called with minImageCount = %d, which is outside the bounds returned "
Mike Weiblend3fb3132016-12-06 10:28:00 -070011866 "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011867 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011868 validation_error_map[VALIDATION_ERROR_02331]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011869 return true;
11870 }
Mike Weiblend3fb3132016-12-06 10:28:00 -070011871
11872 if ((capabilities.maxImageCount > 0) && (pCreateInfo->minImageCount > capabilities.maxImageCount)) {
11873 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11874 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02332, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011875 "%s called with minImageCount = %d, which is outside the bounds returned "
Mike Weiblend3fb3132016-12-06 10:28:00 -070011876 "by vkGetPhysicalDeviceSurfaceCapabilitiesKHR() (i.e. minImageCount = %d, maxImageCount = %d). %s",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011877 func_name, pCreateInfo->minImageCount, capabilities.minImageCount, capabilities.maxImageCount,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011878 validation_error_map[VALIDATION_ERROR_02332]))
11879 return true;
11880 }
11881
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011882 // Validate pCreateInfo->imageExtent against VkSurfaceCapabilitiesKHR::{current|min|max}ImageExtent:
Jamie Madill6069c822016-12-15 09:35:36 -050011883 if ((capabilities.currentExtent.width == kSurfaceSizeFromSwapchain) &&
11884 ((pCreateInfo->imageExtent.width < capabilities.minImageExtent.width) ||
11885 (pCreateInfo->imageExtent.width > capabilities.maxImageExtent.width) ||
11886 (pCreateInfo->imageExtent.height < capabilities.minImageExtent.height) ||
11887 (pCreateInfo->imageExtent.height > capabilities.maxImageExtent.height))) {
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011888 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011889 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011890 "%s called with imageExtent = (%d,%d), which is outside the bounds returned by "
11891 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(): currentExtent = (%d,%d), minImageExtent = (%d,%d), "
11892 "maxImageExtent = (%d,%d). %s",
11893 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11894 capabilities.currentExtent.width, capabilities.currentExtent.height, capabilities.minImageExtent.width,
11895 capabilities.minImageExtent.height, capabilities.maxImageExtent.width, capabilities.maxImageExtent.height,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011896 validation_error_map[VALIDATION_ERROR_02334]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011897 return true;
11898 }
Jamie Madill6069c822016-12-15 09:35:36 -050011899 if ((capabilities.currentExtent.width != kSurfaceSizeFromSwapchain) &&
11900 ((pCreateInfo->imageExtent.width != capabilities.currentExtent.width) ||
11901 (pCreateInfo->imageExtent.height != capabilities.currentExtent.height))) {
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011902 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011903 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02334, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011904 "%s called with imageExtent = (%d,%d), which is not equal to the currentExtent = (%d,%d) returned by "
11905 "vkGetPhysicalDeviceSurfaceCapabilitiesKHR(). %s",
11906 func_name, pCreateInfo->imageExtent.width, pCreateInfo->imageExtent.height,
11907 capabilities.currentExtent.width, capabilities.currentExtent.height,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011908 validation_error_map[VALIDATION_ERROR_02334]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011909 return true;
11910 }
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011911 // pCreateInfo->preTransform should have exactly one bit set, and that bit must also be set in
11912 // VkSurfaceCapabilitiesKHR::supportedTransforms.
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011913 if (!pCreateInfo->preTransform || (pCreateInfo->preTransform & (pCreateInfo->preTransform - 1)) ||
11914 !(pCreateInfo->preTransform & capabilities.supportedTransforms)) {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011915 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
11916 // it up a little at a time, and then log it:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011917 std::string errorString = "";
11918 char str[1024];
11919 // Here's the first part of the message:
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011920 sprintf(str, "%s called with a non-supported pCreateInfo->preTransform (i.e. %s). Supported values are:\n", func_name,
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011921 string_VkSurfaceTransformFlagBitsKHR(pCreateInfo->preTransform));
11922 errorString += str;
11923 for (int i = 0; i < 32; i++) {
11924 // Build up the rest of the message:
11925 if ((1 << i) & capabilities.supportedTransforms) {
11926 const char *newStr = string_VkSurfaceTransformFlagBitsKHR((VkSurfaceTransformFlagBitsKHR)(1 << i));
11927 sprintf(str, " %s\n", newStr);
11928 errorString += str;
11929 }
11930 }
11931 // Log the message that we've built up:
11932 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011933 reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02339, "DS", "%s. %s",
11934 errorString.c_str(), validation_error_map[VALIDATION_ERROR_02339]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011935 return true;
11936 }
Chris Forbesf13f5412016-10-31 17:20:22 +130011937
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011938 // pCreateInfo->compositeAlpha should have exactly one bit set, and that bit must also be set in
11939 // VkSurfaceCapabilitiesKHR::supportedCompositeAlpha
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011940 if (!pCreateInfo->compositeAlpha || (pCreateInfo->compositeAlpha & (pCreateInfo->compositeAlpha - 1)) ||
11941 !((pCreateInfo->compositeAlpha) & capabilities.supportedCompositeAlpha)) {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011942 // This is an error situation; one for which we'd like to give the developer a helpful, multi-line error message. Build
11943 // it up a little at a time, and then log it:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011944 std::string errorString = "";
11945 char str[1024];
11946 // Here's the first part of the message:
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011947 sprintf(str, "%s called with a non-supported pCreateInfo->compositeAlpha (i.e. %s). Supported values are:\n",
11948 func_name, string_VkCompositeAlphaFlagBitsKHR(pCreateInfo->compositeAlpha));
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011949 errorString += str;
11950 for (int i = 0; i < 32; i++) {
11951 // Build up the rest of the message:
11952 if ((1 << i) & capabilities.supportedCompositeAlpha) {
11953 const char *newStr = string_VkCompositeAlphaFlagBitsKHR((VkCompositeAlphaFlagBitsKHR)(1 << i));
11954 sprintf(str, " %s\n", newStr);
11955 errorString += str;
11956 }
11957 }
11958 // Log the message that we've built up:
11959 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011960 reinterpret_cast<uint64_t &>(dev_data->device), __LINE__, VALIDATION_ERROR_02340, "DS", "%s. %s",
11961 errorString.c_str(), validation_error_map[VALIDATION_ERROR_02340]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011962 return true;
11963 }
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011964 // Validate pCreateInfo->imageArrayLayers against VkSurfaceCapabilitiesKHR::maxImageArrayLayers:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011965 if ((pCreateInfo->imageArrayLayers < 1) || (pCreateInfo->imageArrayLayers > capabilities.maxImageArrayLayers)) {
11966 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011967 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02335, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011968 "%s called with a non-supported imageArrayLayers (i.e. %d). Minimum value is 1, maximum value is %d. %s",
11969 func_name, pCreateInfo->imageArrayLayers, capabilities.maxImageArrayLayers,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011970 validation_error_map[VALIDATION_ERROR_02335]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011971 return true;
11972 }
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011973 // Validate pCreateInfo->imageUsage against VkSurfaceCapabilitiesKHR::supportedUsageFlags:
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011974 if (pCreateInfo->imageUsage != (pCreateInfo->imageUsage & capabilities.supportedUsageFlags)) {
11975 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070011976 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02336, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011977 "%s called with a non-supported pCreateInfo->imageUsage (i.e. 0x%08x). Supported flag bits are 0x%08x. %s",
11978 func_name, pCreateInfo->imageUsage, capabilities.supportedUsageFlags,
11979 validation_error_map[VALIDATION_ERROR_02336]))
Chris Forbesfc77bbe2016-10-12 12:24:44 +130011980 return true;
11981 }
Chris Forbesb52e0602016-10-11 16:21:32 +130011982 }
Chris Forbesbc19b5c2016-10-06 13:01:33 +130011983
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011984 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfaceFormatsKHR():
Chris Forbes11ab1712016-11-25 16:37:41 +130011985 if (physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState != QUERY_DETAILS) {
11986 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
11987 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011988 "%s called before calling vkGetPhysicalDeviceSurfaceFormatsKHR().", func_name))
Chris Forbes11ab1712016-11-25 16:37:41 +130011989 return true;
11990 } else {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011991 // Validate pCreateInfo->imageFormat against VkSurfaceFormatKHR::format:
Chris Forbes11ab1712016-11-25 16:37:41 +130011992 bool foundFormat = false;
11993 bool foundColorSpace = false;
11994 bool foundMatch = false;
11995 for (auto const &format : physical_device_state->surface_formats) {
11996 if (pCreateInfo->imageFormat == format.format) {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070011997 // Validate pCreateInfo->imageColorSpace against VkSurfaceFormatKHR::colorSpace:
Chris Forbes11ab1712016-11-25 16:37:41 +130011998 foundFormat = true;
11999 if (pCreateInfo->imageColorSpace == format.colorSpace) {
12000 foundMatch = true;
12001 break;
12002 }
12003 } else {
12004 if (pCreateInfo->imageColorSpace == format.colorSpace) {
12005 foundColorSpace = true;
12006 }
12007 }
12008 }
12009 if (!foundMatch) {
12010 if (!foundFormat) {
Chris Forbes11ab1712016-11-25 16:37:41 +130012011 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012012 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012013 "%s called with a non-supported pCreateInfo->imageFormat (i.e. %d). %s",
12014 func_name, pCreateInfo->imageFormat, validation_error_map[VALIDATION_ERROR_02333]))
Mike Weiblend3fb3132016-12-06 10:28:00 -070012015 return true;
12016 }
12017 if (!foundColorSpace) {
12018 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
12019 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02333, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012020 "%s called with a non-supported pCreateInfo->imageColorSpace (i.e. %d). %s",
12021 func_name, pCreateInfo->imageColorSpace, validation_error_map[VALIDATION_ERROR_02333]))
Chris Forbes11ab1712016-11-25 16:37:41 +130012022 return true;
12023 }
12024 }
12025 }
12026
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012027 // Validate pCreateInfo values with the results of vkGetPhysicalDeviceSurfacePresentModesKHR():
Chris Forbesad22fc32016-11-25 13:17:36 +130012028 if (physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState != QUERY_DETAILS) {
Mark Lobodzinskiba3fe522016-12-27 16:42:06 -070012029 // FIFO is required to always be supported
Chris Forbesad22fc32016-11-25 13:17:36 +130012030 if (pCreateInfo->presentMode != VK_PRESENT_MODE_FIFO_KHR) {
12031 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012032 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, DRAWSTATE_SWAPCHAIN_CREATE_BEFORE_QUERY, "DS",
12033 "%s called before calling vkGetPhysicalDeviceSurfacePresentModesKHR().", func_name))
Chris Forbesad22fc32016-11-25 13:17:36 +130012034 return true;
12035 }
12036 } else {
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012037 // Validate pCreateInfo->presentMode against vkGetPhysicalDeviceSurfacePresentModesKHR():
Chris Forbesad22fc32016-11-25 13:17:36 +130012038 bool foundMatch = std::find(physical_device_state->present_modes.begin(),
12039 physical_device_state->present_modes.end(),
12040 pCreateInfo->presentMode) != physical_device_state->present_modes.end();
12041 if (!foundMatch) {
12042 if (log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012043 reinterpret_cast<uint64_t>(dev_data->device), __LINE__, VALIDATION_ERROR_02341, "DS",
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012044 "%s called with a non-supported presentMode (i.e. %s). %s", func_name,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012045 string_VkPresentModeKHR(pCreateInfo->presentMode), validation_error_map[VALIDATION_ERROR_02341]))
Chris Forbesad22fc32016-11-25 13:17:36 +130012046 return true;
12047 }
12048 }
12049
Chris Forbesbc19b5c2016-10-06 13:01:33 +130012050 return false;
12051}
12052
Mark Lobodzinskid1c34362017-01-11 12:09:31 -070012053static void PostCallRecordCreateSwapchainKHR(layer_data *dev_data, VkResult result, const VkSwapchainCreateInfoKHR *pCreateInfo,
12054 VkSwapchainKHR *pSwapchain, SURFACE_STATE *surface_state,
12055 SWAPCHAIN_NODE *old_swapchain_state) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012056 if (VK_SUCCESS == result) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012057 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbesc3b08152016-10-06 13:01:17 +130012058 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(pCreateInfo, *pSwapchain));
12059 surface_state->swapchain = swapchain_state.get();
12060 dev_data->device_extensions.swapchainMap[*pSwapchain] = std::move(swapchain_state);
12061 } else {
12062 surface_state->swapchain = nullptr;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012063 }
Chris Forbesc3b08152016-10-06 13:01:17 +130012064 // Spec requires that even if CreateSwapchainKHR fails, oldSwapchain behaves as replaced.
Chris Forbes8a047d02016-12-09 10:39:26 +130012065 if (old_swapchain_state) {
12066 old_swapchain_state->replaced = true;
12067 }
Chris Forbesc3b08152016-10-06 13:01:17 +130012068 surface_state->old_swapchain = old_swapchain_state;
Mark Lobodzinskid1c34362017-01-11 12:09:31 -070012069 return;
12070}
12071
12072VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
12073 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
12074 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12075 auto surface_state = getSurfaceState(dev_data->instance_data, pCreateInfo->surface);
12076 auto old_swapchain_state = getSwapchainNode(dev_data, pCreateInfo->oldSwapchain);
12077
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012078 if (PreCallValidateCreateSwapchainKHR(dev_data, "vkCreateSwapChainKHR()", pCreateInfo, surface_state, old_swapchain_state)) {
Mark Lobodzinskid1c34362017-01-11 12:09:31 -070012079 return VK_ERROR_VALIDATION_FAILED_EXT;
12080 }
12081
12082 VkResult result = dev_data->dispatch_table.CreateSwapchainKHR(device, pCreateInfo, pAllocator, pSwapchain);
12083
12084 PostCallRecordCreateSwapchainKHR(dev_data, result, pCreateInfo, pSwapchain, surface_state, old_swapchain_state);
Chris Forbesc3b08152016-10-06 13:01:17 +130012085
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012086 return result;
12087}
12088
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012089VKAPI_ATTR void VKAPI_CALL
12090DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012091 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -060012092 bool skip_call = false;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012093
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012094 std::unique_lock<std::mutex> lock(global_lock);
Tobin Ehlis4e380592016-06-02 12:41:47 -060012095 auto swapchain_data = getSwapchainNode(dev_data, swapchain);
12096 if (swapchain_data) {
12097 if (swapchain_data->images.size() > 0) {
12098 for (auto swapchain_image : swapchain_data->images) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012099 auto image_sub = dev_data->imageSubresourceMap.find(swapchain_image);
12100 if (image_sub != dev_data->imageSubresourceMap.end()) {
12101 for (auto imgsubpair : image_sub->second) {
12102 auto image_item = dev_data->imageLayoutMap.find(imgsubpair);
12103 if (image_item != dev_data->imageLayoutMap.end()) {
12104 dev_data->imageLayoutMap.erase(image_item);
12105 }
12106 }
12107 dev_data->imageSubresourceMap.erase(image_sub);
12108 }
Tobin Ehlisfe871282016-06-28 10:28:02 -060012109 skip_call =
Tobin Ehlise89829a2016-10-11 17:29:32 -060012110 ClearMemoryObjectBindings(dev_data, (uint64_t)swapchain_image, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT);
Tobin Ehlisf263ba42016-04-05 13:33:00 -060012111 dev_data->imageMap.erase(swapchain_image);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012112 }
12113 }
Chris Forbesc3b08152016-10-06 13:01:17 +130012114
12115 auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12116 if (surface_state) {
12117 if (surface_state->swapchain == swapchain_data)
12118 surface_state->swapchain = nullptr;
12119 if (surface_state->old_swapchain == swapchain_data)
12120 surface_state->old_swapchain = nullptr;
12121 }
12122
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012123 dev_data->device_extensions.swapchainMap.erase(swapchain);
12124 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012125 lock.unlock();
Tobin Ehlisfe871282016-06-28 10:28:02 -060012126 if (!skip_call)
Chris Forbesaaa9c282016-10-03 20:01:14 +130012127 dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012128}
12129
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012130VKAPI_ATTR VkResult VKAPI_CALL
12131GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012132 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Chris Forbesaaa9c282016-10-03 20:01:14 +130012133 VkResult result = dev_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012134
12135 if (result == VK_SUCCESS && pSwapchainImages != NULL) {
12136 // This should never happen and is checked by param checker.
12137 if (!pCount)
12138 return result;
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012139 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012140 const size_t count = *pCount;
Tobin Ehlis4e380592016-06-02 12:41:47 -060012141 auto swapchain_node = getSwapchainNode(dev_data, swapchain);
12142 if (swapchain_node && !swapchain_node->images.empty()) {
Tobin Ehlis43d7c522016-03-16 13:52:20 -060012143 // TODO : Not sure I like the memcmp here, but it works
12144 const bool mismatch = (swapchain_node->images.size() != count ||
12145 memcmp(&swapchain_node->images[0], pSwapchainImages, sizeof(swapchain_node->images[0]) * count));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012146 if (mismatch) {
12147 // TODO: Verify against Valid Usage section of extension
12148 log_msg(dev_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12149 (uint64_t)swapchain, __LINE__, MEMTRACK_NONE, "SWAP_CHAIN",
Mark Muelleraab36502016-05-03 13:17:29 -060012150 "vkGetSwapchainInfoKHR(0x%" PRIx64
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012151 ", VK_SWAP_CHAIN_INFO_TYPE_PERSISTENT_IMAGES_KHR) returned mismatching data",
12152 (uint64_t)(swapchain));
12153 }
12154 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012155 for (uint32_t i = 0; i < *pCount; ++i) {
12156 IMAGE_LAYOUT_NODE image_layout_node;
12157 image_layout_node.layout = VK_IMAGE_LAYOUT_UNDEFINED;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012158 image_layout_node.format = swapchain_node->createInfo.imageFormat;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -060012159 // Add imageMap entries for each swapchain image
12160 VkImageCreateInfo image_ci = {};
12161 image_ci.mipLevels = 1;
12162 image_ci.arrayLayers = swapchain_node->createInfo.imageArrayLayers;
12163 image_ci.usage = swapchain_node->createInfo.imageUsage;
12164 image_ci.format = swapchain_node->createInfo.imageFormat;
Tobin Ehlisdae051d2016-06-22 14:16:06 -060012165 image_ci.samples = VK_SAMPLE_COUNT_1_BIT;
Tobin Ehlis1c9c55f2016-06-02 11:49:22 -060012166 image_ci.extent.width = swapchain_node->createInfo.imageExtent.width;
12167 image_ci.extent.height = swapchain_node->createInfo.imageExtent.height;
12168 image_ci.sharingMode = swapchain_node->createInfo.imageSharingMode;
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012169 dev_data->imageMap[pSwapchainImages[i]] = unique_ptr<IMAGE_STATE>(new IMAGE_STATE(pSwapchainImages[i], &image_ci));
12170 auto &image_state = dev_data->imageMap[pSwapchainImages[i]];
12171 image_state->valid = false;
Tobin Ehlis54108272016-10-11 14:26:49 -060012172 image_state->binding.mem = MEMTRACKER_SWAP_CHAIN_IMAGE_KEY;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012173 swapchain_node->images.push_back(pSwapchainImages[i]);
12174 ImageSubresourcePair subpair = {pSwapchainImages[i], false, VkImageSubresource()};
12175 dev_data->imageSubresourceMap[pSwapchainImages[i]].push_back(subpair);
12176 dev_data->imageLayoutMap[subpair] = image_layout_node;
12177 dev_data->device_extensions.imageToSwapchainMap[pSwapchainImages[i]] = swapchain;
12178 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012179 }
12180 return result;
12181}
12182
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012183VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012184 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(queue), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012185 bool skip_call = false;
12186
Chris Forbes95b4fa32016-06-16 17:14:34 +120012187 std::lock_guard<std::mutex> lock(global_lock);
Tobin Ehlis3cd97ff2016-12-15 11:37:02 -070012188 auto queue_state = getQueueState(dev_data, queue);
Chris Forbesb998bf92016-10-12 10:21:19 +130012189
Chris Forbes95b4fa32016-06-16 17:14:34 +120012190 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12191 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
12192 if (pSemaphore && !pSemaphore->signaled) {
Tobin Ehlis50b6c172016-12-22 10:42:36 -070012193 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
12194 VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, 0, __LINE__, DRAWSTATE_QUEUE_FORWARD_PROGRESS,
12195 "DS", "Queue 0x%p is waiting on semaphore 0x%" PRIx64 " that has no way to be signaled.", queue,
12196 reinterpret_cast<const uint64_t &>(pPresentInfo->pWaitSemaphores[i]));
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012197 }
Chris Forbes95b4fa32016-06-16 17:14:34 +120012198 }
Tobin Ehlis5611e922016-06-28 15:52:55 -060012199
Chris Forbes95b4fa32016-06-16 17:14:34 +120012200 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12201 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
Chris Forbesf6f063d2016-09-22 18:34:20 +120012202 if (swapchain_data) {
12203 if (pPresentInfo->pImageIndices[i] >= swapchain_data->images.size()) {
12204 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
Chris Forbesf54f4c72016-09-26 15:18:57 +130012205 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_INVALID_IMAGE,
Chris Forbesf6f063d2016-09-22 18:34:20 +120012206 "DS", "vkQueuePresentKHR: Swapchain image index too large (%u). There are only %u images in this swapchain.",
12207 pPresentInfo->pImageIndices[i], (uint32_t)swapchain_data->images.size());
12208 }
12209 else {
12210 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012211 auto image_state = getImageState(dev_data, image);
12212 skip_call |= ValidateImageMemoryIsValid(dev_data, image_state, "vkQueuePresentKHR()");
Chris Forbesf6f063d2016-09-22 18:34:20 +120012213
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012214 if (!image_state->acquired) {
Chris Forbesf6f063d2016-09-22 18:34:20 +120012215 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12216 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, DRAWSTATE_SWAPCHAIN_IMAGE_NOT_ACQUIRED,
12217 "DS", "vkQueuePresentKHR: Swapchain image index %u has not been acquired.",
12218 pPresentInfo->pImageIndices[i]);
12219 }
12220
12221 vector<VkImageLayout> layouts;
12222 if (FindLayouts(dev_data, image, layouts)) {
12223 for (auto layout : layouts) {
12224 if (layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
12225 skip_call |=
Mike Weiblend3fb3132016-12-06 10:28:00 -070012226 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
12227 reinterpret_cast<uint64_t &>(queue), __LINE__, VALIDATION_ERROR_01964, "DS",
12228 "Images passed to present must be in layout "
12229 "VK_IMAGE_LAYOUT_PRESENT_SRC_KHR but is in %s. %s",
12230 string_VkImageLayout(layout), validation_error_map[VALIDATION_ERROR_01964]);
Chris Forbesf6f063d2016-09-22 18:34:20 +120012231 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012232 }
12233 }
12234 }
Chris Forbesb998bf92016-10-12 10:21:19 +130012235
12236 // All physical devices and queue families are required to be able
12237 // to present to any native window on Android; require the
12238 // application to have established support on any other platform.
12239 if (!dev_data->instance_data->androidSurfaceExtensionEnabled) {
12240 auto surface_state = getSurfaceState(dev_data->instance_data, swapchain_data->createInfo.surface);
12241 auto support_it = surface_state->gpu_queue_support.find({dev_data->physical_device, queue_state->queueFamilyIndex});
12242
12243 if (support_it == surface_state->gpu_queue_support.end()) {
12244 skip_call |=
12245 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12246 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__,
12247 DRAWSTATE_SWAPCHAIN_UNSUPPORTED_QUEUE, "DS", "vkQueuePresentKHR: Presenting image without calling "
12248 "vkGetPhysicalDeviceSurfaceSupportKHR");
12249 } else if (!support_it->second) {
Mike Weiblend3fb3132016-12-06 10:28:00 -070012250 skip_call |=
12251 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12252 reinterpret_cast<uint64_t const &>(pPresentInfo->pSwapchains[i]), __LINE__, VALIDATION_ERROR_01961,
12253 "DS", "vkQueuePresentKHR: Presenting image on queue that cannot "
12254 "present to this surface. %s",
12255 validation_error_map[VALIDATION_ERROR_01961]);
Chris Forbesb998bf92016-10-12 10:21:19 +130012256 }
12257 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012258 }
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012259 }
12260
Chris Forbes95b4fa32016-06-16 17:14:34 +120012261 if (skip_call) {
12262 return VK_ERROR_VALIDATION_FAILED_EXT;
12263 }
12264
Chris Forbesaaa9c282016-10-03 20:01:14 +130012265 VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, pPresentInfo);
Chris Forbes95b4fa32016-06-16 17:14:34 +120012266
12267 if (result != VK_ERROR_VALIDATION_FAILED_EXT) {
12268 // Semaphore waits occur before error generation, if the call reached
12269 // the ICD. (Confirm?)
12270 for (uint32_t i = 0; i < pPresentInfo->waitSemaphoreCount; ++i) {
12271 auto pSemaphore = getSemaphoreNode(dev_data, pPresentInfo->pWaitSemaphores[i]);
Chris Forbes8320a8d2016-08-01 15:15:30 +120012272 if (pSemaphore) {
12273 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes95b4fa32016-06-16 17:14:34 +120012274 pSemaphore->signaled = false;
12275 }
12276 }
Chris Forbes8320a8d2016-08-01 15:15:30 +120012277
Chris Forbes048399d2016-09-22 17:11:06 +120012278 for (uint32_t i = 0; i < pPresentInfo->swapchainCount; ++i) {
12279 // Note: this is imperfect, in that we can get confused about what
12280 // did or didn't succeed-- but if the app does that, it's confused
12281 // itself just as much.
12282 auto local_result = pPresentInfo->pResults ? pPresentInfo->pResults[i] : result;
12283
12284 if (local_result != VK_SUCCESS && local_result != VK_SUBOPTIMAL_KHR)
12285 continue; // this present didn't actually happen.
12286
12287 // Mark the image as having been released to the WSI
12288 auto swapchain_data = getSwapchainNode(dev_data, pPresentInfo->pSwapchains[i]);
12289 auto image = swapchain_data->images[pPresentInfo->pImageIndices[i]];
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012290 auto image_state = getImageState(dev_data, image);
12291 image_state->acquired = false;
Chris Forbes048399d2016-09-22 17:11:06 +120012292 }
12293
Chris Forbes8320a8d2016-08-01 15:15:30 +120012294 // Note: even though presentation is directed to a queue, there is no
12295 // direct ordering between QP and subsequent work, so QP (and its
12296 // semaphore waits) /never/ participate in any completion proof.
Chris Forbes95b4fa32016-06-16 17:14:34 +120012297 }
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012298
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012299 return result;
12300}
12301
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012302static bool PreCallValidateCreateSharedSwapchainsKHR(layer_data *dev_data, uint32_t swapchainCount,
12303 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12304 std::vector<SURFACE_STATE *> &surface_state,
12305 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012306 if (pCreateInfos) {
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012307 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012308 for (uint32_t i = 0; i < swapchainCount; i++) {
12309 surface_state.push_back(getSurfaceState(dev_data->instance_data, pCreateInfos[i].surface));
12310 old_swapchain_state.push_back(getSwapchainNode(dev_data, pCreateInfos[i].oldSwapchain));
Mark Lobodzinskiec3cf782017-01-11 13:54:09 -070012311 std::stringstream func_name;
12312 func_name << "vkCreateSharedSwapchainsKHR[" << swapchainCount << "]";
12313 if (PreCallValidateCreateSwapchainKHR(dev_data, func_name.str().c_str(), &pCreateInfos[i], surface_state[i], old_swapchain_state[i])) {
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012314 return true;
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012315 }
12316 }
12317 }
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012318 return false;
12319}
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012320
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012321static void PostCallRecordCreateSharedSwapchainsKHR(layer_data *dev_data, VkResult result, uint32_t swapchainCount,
12322 const VkSwapchainCreateInfoKHR *pCreateInfos, VkSwapchainKHR *pSwapchains,
12323 std::vector<SURFACE_STATE *> &surface_state,
12324 std::vector<SWAPCHAIN_NODE *> &old_swapchain_state) {
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012325 if (VK_SUCCESS == result) {
12326 for (uint32_t i = 0; i < swapchainCount; i++) {
12327 auto swapchain_state = unique_ptr<SWAPCHAIN_NODE>(new SWAPCHAIN_NODE(&pCreateInfos[i], pSwapchains[i]));
12328 surface_state[i]->swapchain = swapchain_state.get();
12329 dev_data->device_extensions.swapchainMap[pSwapchains[i]] = std::move(swapchain_state);
12330 }
12331 } else {
12332 for (uint32_t i = 0; i < swapchainCount; i++) {
12333 surface_state[i]->swapchain = nullptr;
12334 }
12335 }
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012336 // Spec requires that even if CreateSharedSwapchainKHR fails, oldSwapchain behaves as replaced.
12337 for (uint32_t i = 0; i < swapchainCount; i++) {
12338 if (old_swapchain_state[i]) {
12339 old_swapchain_state[i]->replaced = true;
12340 }
12341 surface_state[i]->old_swapchain = old_swapchain_state[i];
12342 }
Mark Lobodzinski8ba5e252017-01-11 13:14:36 -070012343 return;
12344}
12345
12346VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount,
12347 const VkSwapchainCreateInfoKHR *pCreateInfos,
12348 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) {
12349 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
12350 std::vector<SURFACE_STATE *> surface_state;
12351 std::vector<SWAPCHAIN_NODE *> old_swapchain_state;
12352
12353 if (PreCallValidateCreateSharedSwapchainsKHR(dev_data, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12354 old_swapchain_state)) {
12355 return VK_ERROR_VALIDATION_FAILED_EXT;
12356 }
12357
12358 VkResult result =
12359 dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, pCreateInfos, pAllocator, pSwapchains);
12360
12361 PostCallRecordCreateSharedSwapchainsKHR(dev_data, result, swapchainCount, pCreateInfos, pSwapchains, surface_state,
12362 old_swapchain_state);
Mark Lobodzinskif4d069f2017-01-10 18:10:03 -070012363
Mark Young1a867442016-07-01 15:18:27 -060012364 return result;
12365}
12366
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012367VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImageKHR(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout,
12368 VkSemaphore semaphore, VkFence fence, uint32_t *pImageIndex) {
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012369 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
Tobin Ehlisfe871282016-06-28 10:28:02 -060012370 bool skip_call = false;
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012371
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012372 std::unique_lock<std::mutex> lock(global_lock);
Chris Forbes76fa6c62016-09-22 16:40:27 +120012373
12374 if (fence == VK_NULL_HANDLE && semaphore == VK_NULL_HANDLE) {
12375 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT,
Chris Forbesf54f4c72016-09-26 15:18:57 +130012376 reinterpret_cast<uint64_t &>(device), __LINE__, DRAWSTATE_SWAPCHAIN_NO_SYNC_FOR_ACQUIRE, "DS",
Chris Forbes76fa6c62016-09-22 16:40:27 +120012377 "vkAcquireNextImageKHR: Semaphore and fence cannot both be VK_NULL_HANDLE. There would be no way "
12378 "to determine the completion of this operation.");
12379 }
12380
Chris Forbes8784e952016-06-16 12:20:32 +120012381 auto pSemaphore = getSemaphoreNode(dev_data, semaphore);
12382 if (pSemaphore && pSemaphore->signaled) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060012383 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012384 reinterpret_cast<const uint64_t &>(semaphore), __LINE__, VALIDATION_ERROR_01952, "DS",
12385 "vkAcquireNextImageKHR: Semaphore must not be currently signaled or in a wait state. %s",
12386 validation_error_map[VALIDATION_ERROR_01952]);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012387 }
Chris Forbes8784e952016-06-16 12:20:32 +120012388
12389 auto pFence = getFenceNode(dev_data, fence);
12390 if (pFence) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060012391 skip_call |= ValidateFenceForSubmit(dev_data, pFence);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012392 }
Chris Forbes73de0852016-10-12 13:27:13 +130012393
12394 auto swapchain_data = getSwapchainNode(dev_data, swapchain);
Chris Forbes9a936d72016-12-09 11:00:21 +130012395
12396 if (swapchain_data->replaced) {
12397 skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12398 reinterpret_cast<uint64_t &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_REPLACED, "DS",
12399 "vkAcquireNextImageKHR: This swapchain has been replaced. The application can still "
12400 "present any images it has acquired, but cannot acquire any more.");
12401 }
12402
Chris Forbes73de0852016-10-12 13:27:13 +130012403 auto physical_device_state = getPhysicalDeviceState(dev_data->instance_data, dev_data->physical_device);
12404 if (physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState != UNCALLED) {
Mark Lobodzinski9e023442016-11-23 11:28:30 -070012405 uint64_t acquired_images = std::count_if(swapchain_data->images.begin(), swapchain_data->images.end(),
12406 [=](VkImage image) { return getImageState(dev_data, image)->acquired; });
Chris Forbes73de0852016-10-12 13:27:13 +130012407 if (acquired_images > swapchain_data->images.size() - physical_device_state->surfaceCapabilities.minImageCount) {
Mark Lobodzinski9e023442016-11-23 11:28:30 -070012408 skip_call |=
12409 log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT,
12410 reinterpret_cast<uint64_t const &>(swapchain), __LINE__, DRAWSTATE_SWAPCHAIN_TOO_MANY_IMAGES, "DS",
12411 "vkAcquireNextImageKHR: Application has already acquired the maximum number of images (0x%" PRIxLEAST64 ")",
12412 acquired_images);
Chris Forbes73de0852016-10-12 13:27:13 +130012413 }
12414 }
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012415 lock.unlock();
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012416
Tobin Ehlisfe871282016-06-28 10:28:02 -060012417 if (skip_call)
Chris Forbes8784e952016-06-16 12:20:32 +120012418 return VK_ERROR_VALIDATION_FAILED_EXT;
12419
Chris Forbesaaa9c282016-10-03 20:01:14 +130012420 VkResult result = dev_data->dispatch_table.AcquireNextImageKHR(device, swapchain, timeout, semaphore, fence, pImageIndex);
Chris Forbes8784e952016-06-16 12:20:32 +120012421
12422 lock.lock();
12423 if (result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR) {
12424 if (pFence) {
12425 pFence->state = FENCE_INFLIGHT;
Chris Forbes8320a8d2016-08-01 15:15:30 +120012426 pFence->signaler.first = VK_NULL_HANDLE; // ANI isn't on a queue, so this can't participate in a completion proof.
Chris Forbes8784e952016-06-16 12:20:32 +120012427 }
12428
12429 // A successful call to AcquireNextImageKHR counts as a signal operation on semaphore
12430 if (pSemaphore) {
12431 pSemaphore->signaled = true;
Chris Forbes8320a8d2016-08-01 15:15:30 +120012432 pSemaphore->signaler.first = VK_NULL_HANDLE;
Chris Forbes8784e952016-06-16 12:20:32 +120012433 }
Chris Forbes048399d2016-09-22 17:11:06 +120012434
12435 // Mark the image as acquired.
Chris Forbes048399d2016-09-22 17:11:06 +120012436 auto image = swapchain_data->images[*pImageIndex];
Tobin Ehlis30df15c2016-10-12 17:17:57 -060012437 auto image_state = getImageState(dev_data, image);
12438 image_state->acquired = true;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012439 }
Chris Forbes8784e952016-06-16 12:20:32 +120012440 lock.unlock();
Tobin Ehlis9984f1e2016-04-12 10:49:41 -060012441
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012442 return result;
12443}
12444
Mark Lobodzinski51695432016-06-27 16:47:24 -060012445VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
12446 VkPhysicalDevice *pPhysicalDevices) {
Tobin Ehlisfe871282016-06-28 10:28:02 -060012447 bool skip_call = false;
Chris Forbesfb06dd62016-10-03 19:14:25 +130012448 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012449 assert(instance_data);
Chris Forbesa13fe522016-10-13 15:34:59 +130012450
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012451 // For this instance, flag when vkEnumeratePhysicalDevices goes to QUERY_COUNT and then QUERY_DETAILS
12452 if (NULL == pPhysicalDevices) {
12453 instance_data->vkEnumeratePhysicalDevicesState = QUERY_COUNT;
Mark Lobodzinski51695432016-06-27 16:47:24 -060012454 } else {
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012455 if (UNCALLED == instance_data->vkEnumeratePhysicalDevicesState) {
12456 // Flag warning here. You can call this without having queried the count, but it may not be
12457 // robust on platforms with multiple physical devices.
12458 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12459 VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12460 "Call sequence has vkEnumeratePhysicalDevices() w/ non-NULL pPhysicalDevices. You should first "
12461 "call vkEnumeratePhysicalDevices() w/ NULL pPhysicalDevices to query pPhysicalDeviceCount.");
12462 } // TODO : Could also flag a warning if re-calling this function in QUERY_DETAILS state
12463 else if (instance_data->physical_devices_count != *pPhysicalDeviceCount) {
12464 // Having actual count match count from app is not a requirement, so this can be a warning
12465 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
12466 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12467 "Call to vkEnumeratePhysicalDevices() w/ pPhysicalDeviceCount value %u, but actual count "
12468 "supported by this instance is %u.",
12469 *pPhysicalDeviceCount, instance_data->physical_devices_count);
12470 }
12471 instance_data->vkEnumeratePhysicalDevicesState = QUERY_DETAILS;
Mark Lobodzinski51695432016-06-27 16:47:24 -060012472 }
Tobin Ehlis8b0fbe22017-01-04 07:53:33 -070012473 if (skip_call) {
12474 return VK_ERROR_VALIDATION_FAILED_EXT;
12475 }
12476 VkResult result = instance_data->dispatch_table.EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
12477 if (NULL == pPhysicalDevices) {
12478 instance_data->physical_devices_count = *pPhysicalDeviceCount;
12479 } else if (result == VK_SUCCESS) { // Save physical devices
12480 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
12481 auto &phys_device_state = instance_data->physical_device_map[pPhysicalDevices[i]];
12482 phys_device_state.phys_device = pPhysicalDevices[i];
12483 // Init actual features for each physical device
12484 instance_data->dispatch_table.GetPhysicalDeviceFeatures(pPhysicalDevices[i], &phys_device_state.features);
12485 }
12486 }
12487 return result;
Mark Lobodzinski51695432016-06-27 16:47:24 -060012488}
12489
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012490VKAPI_ATTR void VKAPI_CALL
12491GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
12492 VkQueueFamilyProperties *pQueueFamilyProperties) {
12493 bool skip_call = false;
Chris Forbesfb06dd62016-10-03 19:14:25 +130012494 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
Chris Forbesa88f31b2016-10-03 17:57:18 +130012495 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
Chris Forbes7ff421e2016-10-03 17:55:48 +130012496 if (physical_device_state) {
12497 if (!pQueueFamilyProperties) {
12498 physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_COUNT;
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012499 }
12500 else {
12501 // Verify that for each physical device, this function is called first with NULL pQueueFamilyProperties ptr in order to
12502 // get count
Chris Forbes7ff421e2016-10-03 17:55:48 +130012503 if (UNCALLED == physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState) {
Chris Forbesa88f31b2016-10-03 17:57:18 +130012504 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012505 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_MISSING_QUERY_COUNT, "DL",
12506 "Call sequence has vkGetPhysicalDeviceQueueFamilyProperties() w/ non-NULL "
12507 "pQueueFamilyProperties. You should first call vkGetPhysicalDeviceQueueFamilyProperties() w/ "
12508 "NULL pQueueFamilyProperties to query pCount.");
12509 }
12510 // Then verify that pCount that is passed in on second call matches what was returned
Chris Forbes7ff421e2016-10-03 17:55:48 +130012511 if (physical_device_state->queueFamilyPropertiesCount != *pCount) {
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012512
12513 // TODO: this is not a requirement of the Valid Usage section for vkGetPhysicalDeviceQueueFamilyProperties, so
12514 // provide as warning
Chris Forbesa88f31b2016-10-03 17:57:18 +130012515 skip_call |= log_msg(instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT,
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012516 VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0, __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12517 "Call to vkGetPhysicalDeviceQueueFamilyProperties() w/ pCount value %u, but actual count "
12518 "supported by this physicalDevice is %u.",
Chris Forbes7ff421e2016-10-03 17:55:48 +130012519 *pCount, physical_device_state->queueFamilyPropertiesCount);
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012520 }
Chris Forbes7ff421e2016-10-03 17:55:48 +130012521 physical_device_state->vkGetPhysicalDeviceQueueFamilyPropertiesState = QUERY_DETAILS;
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012522 }
12523 if (skip_call) {
12524 return;
12525 }
Chris Forbes65724852016-10-03 19:54:31 +130012526 instance_data->dispatch_table.GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pCount, pQueueFamilyProperties);
Chris Forbes7ff421e2016-10-03 17:55:48 +130012527 if (!pQueueFamilyProperties) {
12528 physical_device_state->queueFamilyPropertiesCount = *pCount;
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012529 }
12530 else { // Save queue family properties
Chris Forbes8c09adb2016-10-03 18:06:20 +130012531 if (physical_device_state->queue_family_properties.size() < *pCount)
12532 physical_device_state->queue_family_properties.resize(*pCount);
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012533 for (uint32_t i = 0; i < *pCount; i++) {
Chris Forbes8c09adb2016-10-03 18:06:20 +130012534 physical_device_state->queue_family_properties[i] = pQueueFamilyProperties[i];
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012535 }
12536 }
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012537 }
12538 else {
Chris Forbesa88f31b2016-10-03 17:57:18 +130012539 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, 0,
Mike Weiblend3fb3132016-12-06 10:28:00 -070012540 __LINE__, VALIDATION_ERROR_00028, "DL",
Tobin Ehlis50b6c172016-12-22 10:42:36 -070012541 "Invalid physicalDevice (0x%p) passed into vkGetPhysicalDeviceQueueFamilyProperties(). %s", physicalDevice,
12542 validation_error_map[VALIDATION_ERROR_00028]);
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012543 }
12544}
12545
Chris Forbesf9f87832016-10-04 17:42:54 +130012546template<typename TCreateInfo, typename FPtr>
12547static VkResult CreateSurface(VkInstance instance, TCreateInfo const *pCreateInfo,
12548 VkAllocationCallbacks const *pAllocator, VkSurfaceKHR *pSurface,
12549 FPtr fptr)
12550{
12551 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12552
12553 // Call down the call chain:
12554 VkResult result = (instance_data->dispatch_table.*fptr)(instance, pCreateInfo, pAllocator, pSurface);
12555
12556 if (result == VK_SUCCESS) {
12557 std::unique_lock<std::mutex> lock(global_lock);
12558 instance_data->surface_map[*pSurface] = SURFACE_STATE(*pSurface);
12559 lock.unlock();
12560 }
12561
12562 return result;
12563}
12564
12565VKAPI_ATTR void VKAPI_CALL DestroySurfaceKHR(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks *pAllocator) {
12566 bool skip_call = false;
12567 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12568 std::unique_lock<std::mutex> lock(global_lock);
12569 auto surface_state = getSurfaceState(instance_data, surface);
12570
12571 if (surface_state) {
12572 // TODO: track swapchains created from this surface.
12573 instance_data->surface_map.erase(surface);
12574 }
12575 lock.unlock();
12576
12577 if (!skip_call) {
12578 // Call down the call chain:
12579 instance_data->dispatch_table.DestroySurfaceKHR(instance, surface, pAllocator);
12580 }
12581}
12582
Norbert Nopper1dec9a52016-11-25 07:55:13 +010012583VKAPI_ATTR VkResult VKAPI_CALL CreateDisplayPlaneSurfaceKHR(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR *pCreateInfo,
12584 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12585 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateDisplayPlaneSurfaceKHR);
12586}
12587
Chris Forbesf9f87832016-10-04 17:42:54 +130012588#ifdef VK_USE_PLATFORM_ANDROID_KHR
12589VKAPI_ATTR VkResult VKAPI_CALL CreateAndroidSurfaceKHR(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR *pCreateInfo,
12590 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12591 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateAndroidSurfaceKHR);
12592}
12593#endif // VK_USE_PLATFORM_ANDROID_KHR
12594
12595#ifdef VK_USE_PLATFORM_MIR_KHR
12596VKAPI_ATTR VkResult VKAPI_CALL CreateMirSurfaceKHR(VkInstance instance, const VkMirSurfaceCreateInfoKHR *pCreateInfo,
12597 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12598 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateMirSurfaceKHR);
12599}
12600#endif // VK_USE_PLATFORM_MIR_KHR
12601
12602#ifdef VK_USE_PLATFORM_WAYLAND_KHR
12603VKAPI_ATTR VkResult VKAPI_CALL CreateWaylandSurfaceKHR(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR *pCreateInfo,
12604 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
Tobin Ehlisce0dcd22016-10-06 09:11:25 -060012605 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWaylandSurfaceKHR);
Chris Forbesf9f87832016-10-04 17:42:54 +130012606}
12607#endif // VK_USE_PLATFORM_WAYLAND_KHR
12608
12609#ifdef VK_USE_PLATFORM_WIN32_KHR
12610VKAPI_ATTR VkResult VKAPI_CALL CreateWin32SurfaceKHR(VkInstance instance, const VkWin32SurfaceCreateInfoKHR *pCreateInfo,
12611 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12612 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateWin32SurfaceKHR);
12613}
12614#endif // VK_USE_PLATFORM_WIN32_KHR
12615
12616#ifdef VK_USE_PLATFORM_XCB_KHR
12617VKAPI_ATTR VkResult VKAPI_CALL CreateXcbSurfaceKHR(VkInstance instance, const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
12618 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12619 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXcbSurfaceKHR);
12620}
12621#endif // VK_USE_PLATFORM_XCB_KHR
12622
12623#ifdef VK_USE_PLATFORM_XLIB_KHR
12624VKAPI_ATTR VkResult VKAPI_CALL CreateXlibSurfaceKHR(VkInstance instance, const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
12625 const VkAllocationCallbacks *pAllocator, VkSurfaceKHR *pSurface) {
12626 return CreateSurface(instance, pCreateInfo, pAllocator, pSurface, &VkLayerInstanceDispatchTable::CreateXlibSurfaceKHR);
12627}
12628#endif // VK_USE_PLATFORM_XLIB_KHR
12629
12630
Chris Forbes6c2bc8f2016-10-11 15:57:55 +130012631VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12632 VkSurfaceCapabilitiesKHR *pSurfaceCapabilities) {
12633 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12634
12635 std::unique_lock<std::mutex> lock(global_lock);
12636 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12637 lock.unlock();
12638
12639 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
12640 pSurfaceCapabilities);
12641
12642 if (result == VK_SUCCESS) {
12643 physical_device_state->vkGetPhysicalDeviceSurfaceCapabilitiesKHRState = QUERY_DETAILS;
12644 physical_device_state->surfaceCapabilities = *pSurfaceCapabilities;
12645 }
12646
12647 return result;
12648}
12649
Chris Forbes97058a62016-10-12 08:55:03 +130012650
12651VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex,
12652 VkSurfaceKHR surface, VkBool32 *pSupported) {
12653 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12654 std::unique_lock<std::mutex> lock(global_lock);
12655 auto surface_state = getSurfaceState(instance_data, surface);
12656 lock.unlock();
12657
12658 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceSupportKHR(physicalDevice, queueFamilyIndex, surface,
12659 pSupported);
12660
12661 if (result == VK_SUCCESS) {
Mark Lobodzinski9e023442016-11-23 11:28:30 -070012662 surface_state->gpu_queue_support[{physicalDevice, queueFamilyIndex}] = (*pSupported != 0);
Chris Forbes97058a62016-10-12 08:55:03 +130012663 }
12664
12665 return result;
12666}
12667
Chris Forbesad22fc32016-11-25 13:17:36 +130012668
12669VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfacePresentModesKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12670 uint32_t *pPresentModeCount,
12671 VkPresentModeKHR *pPresentModes) {
12672 bool skip_call = false;
12673 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12674 std::unique_lock<std::mutex> lock(global_lock);
12675 // TODO: this isn't quite right. available modes may differ by surface AND physical device.
12676 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12677 auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfacePresentModesKHRState;
12678
12679 if (pPresentModes) {
12680 // Compare the preliminary value of *pPresentModeCount with the value this time:
12681 auto prev_mode_count = (uint32_t) physical_device_state->present_modes.size();
12682 switch (call_state) {
12683 case UNCALLED:
12684 skip_call |= log_msg(
12685 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12686 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12687 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with non-NULL pPresentModeCount; but no prior positive "
12688 "value has been seen for pPresentModeCount.");
12689 break;
12690 default:
12691 // both query count and query details
12692 if (*pPresentModeCount != prev_mode_count) {
12693 skip_call |= log_msg(
12694 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12695 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12696 "vkGetPhysicalDeviceSurfacePresentModesKHR() called with *pPresentModeCount (%u) that differs from the value "
12697 "(%u) that was returned when pPresentModes was NULL.",
12698 *pPresentModeCount, prev_mode_count);
12699 }
12700 break;
12701 }
12702 }
12703 lock.unlock();
12704
12705 if (skip_call)
12706 return VK_ERROR_VALIDATION_FAILED_EXT;
12707
12708 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfacePresentModesKHR(physicalDevice, surface, pPresentModeCount, pPresentModes);
12709
12710 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12711
12712 lock.lock();
12713
12714 if (*pPresentModeCount) {
12715 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12716 if (*pPresentModeCount > physical_device_state->present_modes.size())
12717 physical_device_state->present_modes.resize(*pPresentModeCount);
12718 }
12719 if (pPresentModes) {
12720 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12721 for (uint32_t i = 0; i < *pPresentModeCount; i++) {
12722 physical_device_state->present_modes[i] = pPresentModes[i];
12723 }
12724 }
Chris Forbesad22fc32016-11-25 13:17:36 +130012725 }
12726
12727 return result;
12728}
12729
Chris Forbes11ab1712016-11-25 16:37:41 +130012730
12731VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface,
12732 uint32_t *pSurfaceFormatCount,
12733 VkSurfaceFormatKHR *pSurfaceFormats) {
12734 bool skip_call = false;
12735 auto instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12736 std::unique_lock<std::mutex> lock(global_lock);
12737 auto physical_device_state = getPhysicalDeviceState(instance_data, physicalDevice);
12738 auto & call_state = physical_device_state->vkGetPhysicalDeviceSurfaceFormatsKHRState;
12739
12740 if (pSurfaceFormats) {
12741 auto prev_format_count = (uint32_t) physical_device_state->surface_formats.size();
12742
12743 switch (call_state) {
12744 case UNCALLED:
12745 // Since we haven't recorded a preliminary value of *pSurfaceFormatCount, that likely means that the application didn't
12746 // previously call this function with a NULL value of pSurfaceFormats:
12747 skip_call |= log_msg(
12748 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12749 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_MUST_QUERY_COUNT, "DL",
12750 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount; but no prior positive "
12751 "value has been seen for pSurfaceFormats.");
12752 break;
12753 default:
12754 if (prev_format_count != *pSurfaceFormatCount) {
12755 skip_call |= log_msg(
12756 instance_data->report_data, VK_DEBUG_REPORT_WARNING_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT,
12757 reinterpret_cast<uint64_t>(physicalDevice), __LINE__, DEVLIMITS_COUNT_MISMATCH, "DL",
12758 "vkGetPhysicalDeviceSurfaceFormatsKHR() called with non-NULL pSurfaceFormatCount, and with pSurfaceFormats set to "
12759 "a value (%u) that is greater than the value (%u) that was returned when pSurfaceFormatCount was NULL.",
12760 *pSurfaceFormatCount, prev_format_count);
12761 }
12762 break;
12763 }
12764 }
12765 lock.unlock();
12766
12767 if (skip_call)
12768 return VK_ERROR_VALIDATION_FAILED_EXT;
12769
12770 // Call down the call chain:
12771 auto result = instance_data->dispatch_table.GetPhysicalDeviceSurfaceFormatsKHR(physicalDevice, surface, pSurfaceFormatCount,
12772 pSurfaceFormats);
12773
12774 if (result == VK_SUCCESS || result == VK_INCOMPLETE) {
12775
12776 lock.lock();
12777
12778 if (*pSurfaceFormatCount) {
12779 if (call_state < QUERY_COUNT) call_state = QUERY_COUNT;
12780 if (*pSurfaceFormatCount > physical_device_state->surface_formats.size())
12781 physical_device_state->surface_formats.resize(*pSurfaceFormatCount);
12782 }
12783 if (pSurfaceFormats) {
12784 if (call_state < QUERY_DETAILS) call_state = QUERY_DETAILS;
12785 for (uint32_t i = 0; i < *pSurfaceFormatCount; i++) {
12786 physical_device_state->surface_formats[i] = pSurfaceFormats[i];
12787 }
12788 }
12789 }
12790 return result;
12791}
12792
12793
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012794VKAPI_ATTR VkResult VKAPI_CALL
12795CreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
12796 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
Chris Forbes3dd83742016-10-03 19:35:49 +130012797 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Chris Forbes65724852016-10-03 19:54:31 +130012798 VkResult res = instance_data->dispatch_table.CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012799 if (VK_SUCCESS == res) {
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012800 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130012801 res = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012802 }
12803 return res;
12804}
12805
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012806VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance,
12807 VkDebugReportCallbackEXT msgCallback,
12808 const VkAllocationCallbacks *pAllocator) {
Chris Forbes3dd83742016-10-03 19:35:49 +130012809 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Chris Forbes65724852016-10-03 19:54:31 +130012810 instance_data->dispatch_table.DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
Jeremy Hayesda8797f2016-04-13 16:20:24 -060012811 std::lock_guard<std::mutex> lock(global_lock);
Chris Forbes3dd83742016-10-03 19:35:49 +130012812 layer_destroy_msg_callback(instance_data->report_data, msgCallback, pAllocator);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012813}
12814
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012815VKAPI_ATTR void VKAPI_CALL
12816DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
12817 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Chris Forbes3dd83742016-10-03 19:35:49 +130012818 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
Chris Forbes65724852016-10-03 19:54:31 +130012819 instance_data->dispatch_table.DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012820}
12821
Chia-I Wub02600c2016-05-20 07:11:22 +080012822VKAPI_ATTR VkResult VKAPI_CALL
12823EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
12824 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12825}
12826
12827VKAPI_ATTR VkResult VKAPI_CALL
12828EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
12829 return util_GetLayerProperties(1, &global_layer, pCount, pProperties);
12830}
12831
12832VKAPI_ATTR VkResult VKAPI_CALL
12833EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
12834 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
12835 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
12836
12837 return VK_ERROR_LAYER_NOT_PRESENT;
12838}
12839
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012840VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
12841 const char *pLayerName, uint32_t *pCount,
12842 VkExtensionProperties *pProperties) {
Chia-I Wu3ee80232016-05-06 11:38:37 +080012843 if (pLayerName && !strcmp(pLayerName, global_layer.layerName))
Chia-I Wudbe54242016-05-06 11:17:16 +080012844 return util_GetExtensionProperties(0, NULL, pCount, pProperties);
Chia-I Wu3ee80232016-05-06 11:38:37 +080012845
12846 assert(physicalDevice);
12847
Chris Forbes65724852016-10-03 19:54:31 +130012848 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(physicalDevice), instance_layer_data_map);
12849 return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
Chia-I Wudbe54242016-05-06 11:17:16 +080012850}
12851
Chia-I Wud5186232016-05-06 11:44:32 +080012852static PFN_vkVoidFunction
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012853intercept_core_instance_command(const char *name);
12854
12855static PFN_vkVoidFunction
Chia-I Wud5186232016-05-06 11:44:32 +080012856intercept_core_device_command(const char *name);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012857
Chia-I Wu0730d522016-05-06 11:51:11 +080012858static PFN_vkVoidFunction
12859intercept_khr_swapchain_command(const char *name, VkDevice dev);
12860
Chris Forbesf9f87832016-10-04 17:42:54 +130012861static PFN_vkVoidFunction
12862intercept_khr_surface_command(const char *name, VkInstance instance);
12863
Chia-I Wud5186232016-05-06 11:44:32 +080012864VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice dev, const char *funcName) {
12865 PFN_vkVoidFunction proc = intercept_core_device_command(funcName);
12866 if (proc)
12867 return proc;
12868
12869 assert(dev);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012870
Chia-I Wu0730d522016-05-06 11:51:11 +080012871 proc = intercept_khr_swapchain_command(funcName, dev);
12872 if (proc)
12873 return proc;
12874
Chris Forbesfb06dd62016-10-03 19:14:25 +130012875 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012876
Chris Forbesaaa9c282016-10-03 20:01:14 +130012877 auto &table = dev_data->dispatch_table;
12878 if (!table.GetDeviceProcAddr)
12879 return nullptr;
12880 return table.GetDeviceProcAddr(dev, funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012881}
12882
Chia-I Wu629d7cd2016-05-06 11:32:54 +080012883VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012884 PFN_vkVoidFunction proc = intercept_core_instance_command(funcName);
Chia-I Wudc6e5a22016-05-06 12:04:23 +080012885 if (!proc)
12886 proc = intercept_core_device_command(funcName);
12887 if (!proc)
12888 proc = intercept_khr_swapchain_command(funcName, VK_NULL_HANDLE);
Chris Forbesf9f87832016-10-04 17:42:54 +130012889 if (!proc)
12890 proc = intercept_khr_surface_command(funcName, instance);
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012891 if (proc)
12892 return proc;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012893
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012894 assert(instance);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012895
Chris Forbes3dd83742016-10-03 19:35:49 +130012896 instance_layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
12897 proc = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012898 if (proc)
12899 return proc;
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012900
Chris Forbesaaa9c282016-10-03 20:01:14 +130012901 auto &table = instance_data->dispatch_table;
12902 if (!table.GetInstanceProcAddr)
12903 return nullptr;
12904 return table.GetInstanceProcAddr(instance, funcName);
Tobin Ehlisc96f8062016-03-09 16:12:48 -070012905}
Chia-I Wudbe54242016-05-06 11:17:16 +080012906
Chia-I Wud5186232016-05-06 11:44:32 +080012907static PFN_vkVoidFunction
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012908intercept_core_instance_command(const char *name) {
12909 static const struct {
12910 const char *name;
12911 PFN_vkVoidFunction proc;
12912 } core_instance_commands[] = {
12913 { "vkGetInstanceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetInstanceProcAddr) },
12914 { "vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr) },
12915 { "vkCreateInstance", reinterpret_cast<PFN_vkVoidFunction>(CreateInstance) },
12916 { "vkCreateDevice", reinterpret_cast<PFN_vkVoidFunction>(CreateDevice) },
Mark Lobodzinski51695432016-06-27 16:47:24 -060012917 { "vkEnumeratePhysicalDevices", reinterpret_cast<PFN_vkVoidFunction>(EnumeratePhysicalDevices) },
Mark Lobodzinski9b79d7a2016-06-28 10:04:01 -060012918 { "vkGetPhysicalDeviceQueueFamilyProperties", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceQueueFamilyProperties) },
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012919 { "vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance) },
Chia-I Wub02600c2016-05-20 07:11:22 +080012920 { "vkEnumerateInstanceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceLayerProperties) },
12921 { "vkEnumerateDeviceLayerProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceLayerProperties) },
12922 { "vkEnumerateInstanceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateInstanceExtensionProperties) },
Chia-I Wu8be4b3b2016-05-06 11:55:53 +080012923 { "vkEnumerateDeviceExtensionProperties", reinterpret_cast<PFN_vkVoidFunction>(EnumerateDeviceExtensionProperties) },
12924 };
12925
12926 for (size_t i = 0; i < ARRAY_SIZE(core_instance_commands); i++) {
12927 if (!strcmp(core_instance_commands[i].name, name))
12928 return core_instance_commands[i].proc;
12929 }
12930
12931 return nullptr;
12932}
12933
12934static PFN_vkVoidFunction
Chia-I Wud5186232016-05-06 11:44:32 +080012935intercept_core_device_command(const char *name) {
12936 static const struct {
12937 const char *name;
12938 PFN_vkVoidFunction proc;
12939 } core_device_commands[] = {
Tobin Ehlisd9867fc2016-05-12 16:57:14 -060012940 {"vkGetDeviceProcAddr", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceProcAddr)},
12941 {"vkQueueSubmit", reinterpret_cast<PFN_vkVoidFunction>(QueueSubmit)},
12942 {"vkWaitForFences", reinterpret_cast<PFN_vkVoidFunction>(WaitForFences)},
12943 {"vkGetFenceStatus", reinterpret_cast<PFN_vkVoidFunction>(GetFenceStatus)},
12944 {"vkQueueWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(QueueWaitIdle)},
12945 {"vkDeviceWaitIdle", reinterpret_cast<PFN_vkVoidFunction>(DeviceWaitIdle)},
12946 {"vkGetDeviceQueue", reinterpret_cast<PFN_vkVoidFunction>(GetDeviceQueue)},
12947 {"vkDestroyInstance", reinterpret_cast<PFN_vkVoidFunction>(DestroyInstance)},
12948 {"vkDestroyDevice", reinterpret_cast<PFN_vkVoidFunction>(DestroyDevice)},
12949 {"vkDestroyFence", reinterpret_cast<PFN_vkVoidFunction>(DestroyFence)},
12950 {"vkResetFences", reinterpret_cast<PFN_vkVoidFunction>(ResetFences)},
12951 {"vkDestroySemaphore", reinterpret_cast<PFN_vkVoidFunction>(DestroySemaphore)},
12952 {"vkDestroyEvent", reinterpret_cast<PFN_vkVoidFunction>(DestroyEvent)},
12953 {"vkDestroyQueryPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyQueryPool)},
12954 {"vkDestroyBuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyBuffer)},
12955 {"vkDestroyBufferView", reinterpret_cast<PFN_vkVoidFunction>(DestroyBufferView)},
12956 {"vkDestroyImage", reinterpret_cast<PFN_vkVoidFunction>(DestroyImage)},
12957 {"vkDestroyImageView", reinterpret_cast<PFN_vkVoidFunction>(DestroyImageView)},
12958 {"vkDestroyShaderModule", reinterpret_cast<PFN_vkVoidFunction>(DestroyShaderModule)},
12959 {"vkDestroyPipeline", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipeline)},
12960 {"vkDestroyPipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineLayout)},
12961 {"vkDestroySampler", reinterpret_cast<PFN_vkVoidFunction>(DestroySampler)},
12962 {"vkDestroyDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorSetLayout)},
12963 {"vkDestroyDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyDescriptorPool)},
12964 {"vkDestroyFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(DestroyFramebuffer)},
12965 {"vkDestroyRenderPass", reinterpret_cast<PFN_vkVoidFunction>(DestroyRenderPass)},
12966 {"vkCreateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateBuffer)},
12967 {"vkCreateBufferView", reinterpret_cast<PFN_vkVoidFunction>(CreateBufferView)},
12968 {"vkCreateImage", reinterpret_cast<PFN_vkVoidFunction>(CreateImage)},
12969 {"vkCreateImageView", reinterpret_cast<PFN_vkVoidFunction>(CreateImageView)},
12970 {"vkCreateFence", reinterpret_cast<PFN_vkVoidFunction>(CreateFence)},
12971 {"vkCreatePipelineCache", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineCache)},
12972 {"vkDestroyPipelineCache", reinterpret_cast<PFN_vkVoidFunction>(DestroyPipelineCache)},
12973 {"vkGetPipelineCacheData", reinterpret_cast<PFN_vkVoidFunction>(GetPipelineCacheData)},
12974 {"vkMergePipelineCaches", reinterpret_cast<PFN_vkVoidFunction>(MergePipelineCaches)},
12975 {"vkCreateGraphicsPipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateGraphicsPipelines)},
12976 {"vkCreateComputePipelines", reinterpret_cast<PFN_vkVoidFunction>(CreateComputePipelines)},
12977 {"vkCreateSampler", reinterpret_cast<PFN_vkVoidFunction>(CreateSampler)},
12978 {"vkCreateDescriptorSetLayout", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorSetLayout)},
12979 {"vkCreatePipelineLayout", reinterpret_cast<PFN_vkVoidFunction>(CreatePipelineLayout)},
12980 {"vkCreateDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(CreateDescriptorPool)},
12981 {"vkResetDescriptorPool", reinterpret_cast<PFN_vkVoidFunction>(ResetDescriptorPool)},
12982 {"vkAllocateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(AllocateDescriptorSets)},
12983 {"vkFreeDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(FreeDescriptorSets)},
12984 {"vkUpdateDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(UpdateDescriptorSets)},
12985 {"vkCreateCommandPool", reinterpret_cast<PFN_vkVoidFunction>(CreateCommandPool)},
12986 {"vkDestroyCommandPool", reinterpret_cast<PFN_vkVoidFunction>(DestroyCommandPool)},
12987 {"vkResetCommandPool", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandPool)},
12988 {"vkCreateQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CreateQueryPool)},
12989 {"vkAllocateCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(AllocateCommandBuffers)},
12990 {"vkFreeCommandBuffers", reinterpret_cast<PFN_vkVoidFunction>(FreeCommandBuffers)},
12991 {"vkBeginCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(BeginCommandBuffer)},
12992 {"vkEndCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(EndCommandBuffer)},
12993 {"vkResetCommandBuffer", reinterpret_cast<PFN_vkVoidFunction>(ResetCommandBuffer)},
12994 {"vkCmdBindPipeline", reinterpret_cast<PFN_vkVoidFunction>(CmdBindPipeline)},
12995 {"vkCmdSetViewport", reinterpret_cast<PFN_vkVoidFunction>(CmdSetViewport)},
12996 {"vkCmdSetScissor", reinterpret_cast<PFN_vkVoidFunction>(CmdSetScissor)},
12997 {"vkCmdSetLineWidth", reinterpret_cast<PFN_vkVoidFunction>(CmdSetLineWidth)},
12998 {"vkCmdSetDepthBias", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBias)},
12999 {"vkCmdSetBlendConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdSetBlendConstants)},
13000 {"vkCmdSetDepthBounds", reinterpret_cast<PFN_vkVoidFunction>(CmdSetDepthBounds)},
13001 {"vkCmdSetStencilCompareMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilCompareMask)},
13002 {"vkCmdSetStencilWriteMask", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilWriteMask)},
13003 {"vkCmdSetStencilReference", reinterpret_cast<PFN_vkVoidFunction>(CmdSetStencilReference)},
13004 {"vkCmdBindDescriptorSets", reinterpret_cast<PFN_vkVoidFunction>(CmdBindDescriptorSets)},
13005 {"vkCmdBindVertexBuffers", reinterpret_cast<PFN_vkVoidFunction>(CmdBindVertexBuffers)},
13006 {"vkCmdBindIndexBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdBindIndexBuffer)},
13007 {"vkCmdDraw", reinterpret_cast<PFN_vkVoidFunction>(CmdDraw)},
13008 {"vkCmdDrawIndexed", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexed)},
13009 {"vkCmdDrawIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndirect)},
13010 {"vkCmdDrawIndexedIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDrawIndexedIndirect)},
13011 {"vkCmdDispatch", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatch)},
13012 {"vkCmdDispatchIndirect", reinterpret_cast<PFN_vkVoidFunction>(CmdDispatchIndirect)},
13013 {"vkCmdCopyBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBuffer)},
13014 {"vkCmdCopyImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImage)},
13015 {"vkCmdBlitImage", reinterpret_cast<PFN_vkVoidFunction>(CmdBlitImage)},
13016 {"vkCmdCopyBufferToImage", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyBufferToImage)},
13017 {"vkCmdCopyImageToBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyImageToBuffer)},
13018 {"vkCmdUpdateBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdUpdateBuffer)},
13019 {"vkCmdFillBuffer", reinterpret_cast<PFN_vkVoidFunction>(CmdFillBuffer)},
13020 {"vkCmdClearColorImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearColorImage)},
13021 {"vkCmdClearDepthStencilImage", reinterpret_cast<PFN_vkVoidFunction>(CmdClearDepthStencilImage)},
13022 {"vkCmdClearAttachments", reinterpret_cast<PFN_vkVoidFunction>(CmdClearAttachments)},
13023 {"vkCmdResolveImage", reinterpret_cast<PFN_vkVoidFunction>(CmdResolveImage)},
13024 {"vkCmdSetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdSetEvent)},
13025 {"vkCmdResetEvent", reinterpret_cast<PFN_vkVoidFunction>(CmdResetEvent)},
13026 {"vkCmdWaitEvents", reinterpret_cast<PFN_vkVoidFunction>(CmdWaitEvents)},
13027 {"vkCmdPipelineBarrier", reinterpret_cast<PFN_vkVoidFunction>(CmdPipelineBarrier)},
13028 {"vkCmdBeginQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginQuery)},
13029 {"vkCmdEndQuery", reinterpret_cast<PFN_vkVoidFunction>(CmdEndQuery)},
13030 {"vkCmdResetQueryPool", reinterpret_cast<PFN_vkVoidFunction>(CmdResetQueryPool)},
13031 {"vkCmdCopyQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(CmdCopyQueryPoolResults)},
13032 {"vkCmdPushConstants", reinterpret_cast<PFN_vkVoidFunction>(CmdPushConstants)},
13033 {"vkCmdWriteTimestamp", reinterpret_cast<PFN_vkVoidFunction>(CmdWriteTimestamp)},
13034 {"vkCreateFramebuffer", reinterpret_cast<PFN_vkVoidFunction>(CreateFramebuffer)},
13035 {"vkCreateShaderModule", reinterpret_cast<PFN_vkVoidFunction>(CreateShaderModule)},
13036 {"vkCreateRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CreateRenderPass)},
13037 {"vkCmdBeginRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdBeginRenderPass)},
13038 {"vkCmdNextSubpass", reinterpret_cast<PFN_vkVoidFunction>(CmdNextSubpass)},
13039 {"vkCmdEndRenderPass", reinterpret_cast<PFN_vkVoidFunction>(CmdEndRenderPass)},
13040 {"vkCmdExecuteCommands", reinterpret_cast<PFN_vkVoidFunction>(CmdExecuteCommands)},
13041 {"vkSetEvent", reinterpret_cast<PFN_vkVoidFunction>(SetEvent)},
13042 {"vkMapMemory", reinterpret_cast<PFN_vkVoidFunction>(MapMemory)},
13043 {"vkUnmapMemory", reinterpret_cast<PFN_vkVoidFunction>(UnmapMemory)},
13044 {"vkFlushMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(FlushMappedMemoryRanges)},
13045 {"vkInvalidateMappedMemoryRanges", reinterpret_cast<PFN_vkVoidFunction>(InvalidateMappedMemoryRanges)},
13046 {"vkAllocateMemory", reinterpret_cast<PFN_vkVoidFunction>(AllocateMemory)},
13047 {"vkFreeMemory", reinterpret_cast<PFN_vkVoidFunction>(FreeMemory)},
13048 {"vkBindBufferMemory", reinterpret_cast<PFN_vkVoidFunction>(BindBufferMemory)},
13049 {"vkGetBufferMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetBufferMemoryRequirements)},
13050 {"vkGetImageMemoryRequirements", reinterpret_cast<PFN_vkVoidFunction>(GetImageMemoryRequirements)},
13051 {"vkGetQueryPoolResults", reinterpret_cast<PFN_vkVoidFunction>(GetQueryPoolResults)},
13052 {"vkBindImageMemory", reinterpret_cast<PFN_vkVoidFunction>(BindImageMemory)},
13053 {"vkQueueBindSparse", reinterpret_cast<PFN_vkVoidFunction>(QueueBindSparse)},
13054 {"vkCreateSemaphore", reinterpret_cast<PFN_vkVoidFunction>(CreateSemaphore)},
13055 {"vkCreateEvent", reinterpret_cast<PFN_vkVoidFunction>(CreateEvent)},
Chia-I Wud5186232016-05-06 11:44:32 +080013056 };
13057
13058 for (size_t i = 0; i < ARRAY_SIZE(core_device_commands); i++) {
13059 if (!strcmp(core_device_commands[i].name, name))
13060 return core_device_commands[i].proc;
13061 }
13062
13063 return nullptr;
13064}
13065
Chia-I Wu0730d522016-05-06 11:51:11 +080013066static PFN_vkVoidFunction
13067intercept_khr_swapchain_command(const char *name, VkDevice dev) {
13068 static const struct {
13069 const char *name;
13070 PFN_vkVoidFunction proc;
13071 } khr_swapchain_commands[] = {
13072 { "vkCreateSwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateSwapchainKHR) },
13073 { "vkDestroySwapchainKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySwapchainKHR) },
13074 { "vkGetSwapchainImagesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetSwapchainImagesKHR) },
13075 { "vkAcquireNextImageKHR", reinterpret_cast<PFN_vkVoidFunction>(AcquireNextImageKHR) },
13076 { "vkQueuePresentKHR", reinterpret_cast<PFN_vkVoidFunction>(QueuePresentKHR) },
13077 };
Mark Young1a867442016-07-01 15:18:27 -060013078 layer_data *dev_data = nullptr;
Chia-I Wu0730d522016-05-06 11:51:11 +080013079
Chia-I Wudc6e5a22016-05-06 12:04:23 +080013080 if (dev) {
Mark Young1a867442016-07-01 15:18:27 -060013081 dev_data = get_my_data_ptr(get_dispatch_key(dev), layer_data_map);
Chia-I Wudc6e5a22016-05-06 12:04:23 +080013082 if (!dev_data->device_extensions.wsi_enabled)
13083 return nullptr;
13084 }
Chia-I Wu0730d522016-05-06 11:51:11 +080013085
13086 for (size_t i = 0; i < ARRAY_SIZE(khr_swapchain_commands); i++) {
13087 if (!strcmp(khr_swapchain_commands[i].name, name))
13088 return khr_swapchain_commands[i].proc;
13089 }
13090
Mark Young1a867442016-07-01 15:18:27 -060013091 if (dev_data) {
13092 if (!dev_data->device_extensions.wsi_display_swapchain_enabled)
13093 return nullptr;
13094 }
13095
13096 if (!strcmp("vkCreateSharedSwapchainsKHR", name))
13097 return reinterpret_cast<PFN_vkVoidFunction>(CreateSharedSwapchainsKHR);
13098
Chia-I Wu0730d522016-05-06 11:51:11 +080013099 return nullptr;
13100}
13101
Chris Forbesf9f87832016-10-04 17:42:54 +130013102static PFN_vkVoidFunction
13103intercept_khr_surface_command(const char *name, VkInstance instance) {
13104 static const struct {
13105 const char *name;
13106 PFN_vkVoidFunction proc;
13107 bool instance_layer_data::*enable;
13108 } khr_surface_commands[] = {
13109#ifdef VK_USE_PLATFORM_ANDROID_KHR
13110 {"vkCreateAndroidSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateAndroidSurfaceKHR),
13111 &instance_layer_data::androidSurfaceExtensionEnabled},
13112#endif // VK_USE_PLATFORM_ANDROID_KHR
13113#ifdef VK_USE_PLATFORM_MIR_KHR
13114 {"vkCreateMirSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateMirSurfaceKHR),
13115 &instance_layer_data::mirSurfaceExtensionEnabled},
13116#endif // VK_USE_PLATFORM_MIR_KHR
13117#ifdef VK_USE_PLATFORM_WAYLAND_KHR
13118 {"vkCreateWaylandSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWaylandSurfaceKHR),
13119 &instance_layer_data::waylandSurfaceExtensionEnabled},
13120#endif // VK_USE_PLATFORM_WAYLAND_KHR
13121#ifdef VK_USE_PLATFORM_WIN32_KHR
13122 {"vkCreateWin32SurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateWin32SurfaceKHR),
13123 &instance_layer_data::win32SurfaceExtensionEnabled},
13124#endif // VK_USE_PLATFORM_WIN32_KHR
13125#ifdef VK_USE_PLATFORM_XCB_KHR
13126 {"vkCreateXcbSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXcbSurfaceKHR),
13127 &instance_layer_data::xcbSurfaceExtensionEnabled},
13128#endif // VK_USE_PLATFORM_XCB_KHR
13129#ifdef VK_USE_PLATFORM_XLIB_KHR
13130 {"vkCreateXlibSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateXlibSurfaceKHR),
13131 &instance_layer_data::xlibSurfaceExtensionEnabled},
13132#endif // VK_USE_PLATFORM_XLIB_KHR
Norbert Nopper1dec9a52016-11-25 07:55:13 +010013133 { "vkCreateDisplayPlaneSurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(CreateDisplayPlaneSurfaceKHR),
13134 &instance_layer_data::displayExtensionEnabled},
Chris Forbesf9f87832016-10-04 17:42:54 +130013135 {"vkDestroySurfaceKHR", reinterpret_cast<PFN_vkVoidFunction>(DestroySurfaceKHR),
13136 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbes6c2bc8f2016-10-11 15:57:55 +130013137 {"vkGetPhysicalDeviceSurfaceCapabilitiesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceCapabilitiesKHR),
13138 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbes97058a62016-10-12 08:55:03 +130013139 {"vkGetPhysicalDeviceSurfaceSupportKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceSupportKHR),
13140 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbesad22fc32016-11-25 13:17:36 +130013141 {"vkGetPhysicalDeviceSurfacePresentModesKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfacePresentModesKHR),
13142 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbes11ab1712016-11-25 16:37:41 +130013143 {"vkGetPhysicalDeviceSurfaceFormatsKHR", reinterpret_cast<PFN_vkVoidFunction>(GetPhysicalDeviceSurfaceFormatsKHR),
13144 &instance_layer_data::surfaceExtensionEnabled},
Chris Forbesf9f87832016-10-04 17:42:54 +130013145 };
13146
13147 instance_layer_data *instance_data = nullptr;
13148 if (instance) {
13149 instance_data = get_my_data_ptr(get_dispatch_key(instance), instance_layer_data_map);
13150 }
13151
13152 for (size_t i = 0; i < ARRAY_SIZE(khr_surface_commands); i++) {
13153 if (!strcmp(khr_surface_commands[i].name, name)) {
13154 if (instance_data && !(instance_data->*(khr_surface_commands[i].enable)))
13155 return nullptr;
13156 return khr_surface_commands[i].proc;
13157 }
13158 }
13159
13160 return nullptr;
13161}
13162
Chia-I Wua67f6842016-05-06 11:20:20 +080013163} // namespace core_validation
13164
13165// vk_layer_logging.h expects these to be defined
13166
13167VKAPI_ATTR VkResult VKAPI_CALL
13168vkCreateDebugReportCallbackEXT(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
13169 const VkAllocationCallbacks *pAllocator, VkDebugReportCallbackEXT *pMsgCallback) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013170 return core_validation::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
Chia-I Wua67f6842016-05-06 11:20:20 +080013171}
13172
13173VKAPI_ATTR void VKAPI_CALL
13174vkDestroyDebugReportCallbackEXT(VkInstance instance,
13175 VkDebugReportCallbackEXT msgCallback,
13176 const VkAllocationCallbacks *pAllocator) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013177 core_validation::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
Chia-I Wua67f6842016-05-06 11:20:20 +080013178}
13179
13180VKAPI_ATTR void VKAPI_CALL
13181vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objType, uint64_t object,
13182 size_t location, int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013183 core_validation::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
Chia-I Wua67f6842016-05-06 11:20:20 +080013184}
13185
Chia-I Wub02600c2016-05-20 07:11:22 +080013186// loader-layer interface v0, just wrappers since there is only a layer
Chia-I Wua67f6842016-05-06 11:20:20 +080013187
Chia-I Wudbe54242016-05-06 11:17:16 +080013188VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13189vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, VkExtensionProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013190 return core_validation::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
Chia-I Wudbe54242016-05-06 11:17:16 +080013191}
13192
13193VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13194vkEnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013195 return core_validation::EnumerateInstanceLayerProperties(pCount, pProperties);
Chia-I Wudbe54242016-05-06 11:17:16 +080013196}
13197
13198VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL
13199vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, VkLayerProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013200 // the layer command handles VK_NULL_HANDLE just fine internally
13201 assert(physicalDevice == VK_NULL_HANDLE);
13202 return core_validation::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
Chia-I Wua67f6842016-05-06 11:20:20 +080013203}
13204
13205VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
13206 const char *pLayerName, uint32_t *pCount,
13207 VkExtensionProperties *pProperties) {
Chia-I Wub02600c2016-05-20 07:11:22 +080013208 // the layer command handles VK_NULL_HANDLE just fine internally
13209 assert(physicalDevice == VK_NULL_HANDLE);
Chia-I Wu3ee80232016-05-06 11:38:37 +080013210 return core_validation::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
Chia-I Wua67f6842016-05-06 11:20:20 +080013211}
13212
13213VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013214 return core_validation::GetDeviceProcAddr(dev, funcName);
Chia-I Wua67f6842016-05-06 11:20:20 +080013215}
13216
13217VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
Chia-I Wu629d7cd2016-05-06 11:32:54 +080013218 return core_validation::GetInstanceProcAddr(instance, funcName);
Chia-I Wudbe54242016-05-06 11:17:16 +080013219}